code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def A_ ( ):
'''simple docstring'''
snake_case_, snake_case_ :Tuple = 9, 14 # noqa: F841
snake_case_ :Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case_ :Any = defaultdict(_lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case_ :Union[str, Any] = mst(_lowercase )
snake_case_ :List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case_ :Optional[Any] = tuple(answer[:2] )
snake_case_ :Dict = tuple(edge[::-1] )
assert edge in result or reverse in result
| 66 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__a = logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :List[str] = 4
snake_case_ :Tuple = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :str = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Union[str, Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (3, 32, 32)
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
snake_case_ :Tuple = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = UNetaDModel
_A : Union[str, Any] = """sample"""
@property
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 4
snake_case_ :int = (32, 32)
snake_case_ :Any = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return (4, 32, 32)
@property
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return (4, 32, 32)
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case_ :Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
snake_case_ :List[str] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case_, snake_case_ :Union[str, Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model.to(snake_case )
snake_case_ :Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def lowerCAmelCase_ ( self: str ) -> Any:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_, snake_case_ :List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
snake_case_ :List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :str = torch.tensor([10] * noise.shape[0] ).to(snake_case )
snake_case_ :Optional[int] = model_accelerate(snake_case , snake_case )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_, snake_case_ :str = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
snake_case_ :int = model_normal_load(snake_case , snake_case )["""sample"""]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_ :Tuple = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(snake_case )
snake_case_ :Optional[int] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ :int = noise.to(snake_case )
snake_case_ :List[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
snake_case_ :Union[str, Any] = model(snake_case , snake_case ).sample
snake_case_ :Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ :Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = UNetaDModel
_A : List[Any] = """sample"""
@property
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: int=(32, 32) ) -> Tuple:
snake_case_ :Union[str, Any] = 4
snake_case_ :Any = 3
snake_case_ :int = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self: int ) -> Tuple:
return (3, 32, 32)
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case_ :List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
snake_case_ :int = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_, snake_case_ :List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(snake_case )
snake_case_ :Any = self.dummy_input
snake_case_ :int = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
snake_case_ :int = noise
snake_case_ :int = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase_ ( self: str ) -> Dict:
snake_case_ :Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(snake_case )
snake_case_ :List[str] = 4
snake_case_ :Optional[int] = 3
snake_case_ :List[str] = (256, 256)
snake_case_ :Tuple = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :Dict = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
snake_case_ :Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(snake_case )
snake_case_ :Optional[int] = 4
snake_case_ :Optional[Any] = 3
snake_case_ :Optional[Any] = (32, 32)
snake_case_ :Dict = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
snake_case_ :Any = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
snake_case_ :str = model(snake_case , snake_case ).sample
snake_case_ :int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ :int = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
# not required for this model
pass
| 66 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__A : Optional[int] , **__A : Dict ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(_UpperCAmelCase )
def __call__( self : Tuple , __A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__A : Tuple ):
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _lowercase ( self : Union[str, Any] , **__A : Tuple ):
return {}, {}, {}
def _lowercase ( self : Tuple , __A : int ):
snake_case__ : List[str] = load_image(_UpperCAmelCase )
snake_case__ : List[Any] = image.size
snake_case__ : Optional[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def _lowercase ( self : List[Any] , __A : Tuple ):
snake_case__ : Optional[Any] = self.model(**_UpperCAmelCase )
return model_outputs
def _lowercase ( self : Optional[Any] , __A : List[str] ):
snake_case__ : Tuple = model_outputs.predicted_depth
snake_case__ : Any = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=_UpperCAmelCase )
snake_case__ : List[Any] = prediction.squeeze().cpu().numpy()
snake_case__ : List[Any] = (output * 2_5_5 / np.max(_UpperCAmelCase )).astype("uint8" )
snake_case__ : List[Any] = Image.fromarray(_UpperCAmelCase )
snake_case__ : Optional[int] = {}
snake_case__ : Optional[Any] = predicted_depth
snake_case__ : int = depth
return output_dict
| 367 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : bool , snake_case_ : bool ):
def run_func(snake_case_ : str ):
@wraps(snake_case_ )
def run_in_eager_mode(*snake_case_ : str , **snake_case_ : Union[str, Any] ):
return func(*snake_case_ , **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*snake_case_ : List[Any] , **snake_case_ : List[Any] ):
return func(*snake_case_ , **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
snake_case__ : Dict = random.Random()
snake_case__ : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = "TensorFlow"
@property
def _lowercase ( self : List[str] ):
return tf.__version__
def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
snake_case__ : str = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Dict = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def _lowercase ( self : Tuple , __A : str , __A : int , __A : int ):
snake_case__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Any = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def _lowercase ( self : List[Any] , __A : str , __A : int , __A : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
snake_case__ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : Optional[Any] = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def _lowercase ( self : str , __A : str , __A : int , __A : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
snake_case__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case__ : int = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def _lowercase ( self : Union[str, Any] , __A : str , __A : int , __A : int ):
snake_case__ : int = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case__ : Tuple = (
hasattr(__A , "architectures" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : Union[str, Any] = __import__("transformers" , fromlist=[model_class] )
snake_case__ : Any = getattr(__A , __A )
snake_case__ : Dict = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case__ : Dict = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
snake_case__ : Optional[int] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size
snake_case__ : List[Any] = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
snake_case__ : Optional[int] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase ( self : List[str] , __A : str , __A : int , __A : int ):
snake_case__ : Optional[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case__ : Any = (
hasattr(__A , "architectures" )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : List[Any] = __import__("transformers" , fromlist=[model_class] )
snake_case__ : Optional[int] = getattr(__A , __A )
snake_case__ : str = model_cls(__A )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case__ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
snake_case__ : Union[str, Any] = config.vocab_size if hasattr(__A , "vocab_size" ) else config.encoder.vocab_size
snake_case__ : List[str] = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case__ : str = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
snake_case__ : Dict = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case__ : Optional[Any] = model(__A , labels=__A , training=__A )[0]
snake_case__ : Dict = tf.gradients(__A , model.trainable_variables )
return gradients
snake_case__ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase ( self : int , __A : List[Any] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case__ : Optional[Any] = timeit.repeat(
__A , repeat=self.args.repeat , number=1_0 , )
return min(__A ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowercase ( self : str , __A : Callable[[], None] ):
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
snake_case__ : Optional[int] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
snake_case__ : List[str] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
snake_case__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(__A )
snake_case__ : Optional[int] = meminfo.used
snake_case__ : Any = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
snake_case__ : int = None
else:
snake_case__ : Any = measure_peak_memory_cpu(__A )
snake_case__ : Tuple = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case__ : Optional[int] = stop_memory_tracing(__A )
if memory is None:
snake_case__ : Dict = summary.total
else:
snake_case__ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 286 | 0 |
from maths.prime_factors import prime_factors
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_lowerCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : int ) -> str:
'''simple docstring'''
_UpperCAmelCase = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : List[Any] ) -> List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_UpperCAmelCase = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected
| 22 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowercase__ =threading.Lock()
lowercase__ =None
lowercase__ ={
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase__ =logging.WARNING
lowercase__ =True
def __UpperCamelCase ( ):
__a : str = os.getenv('''TRANSFORMERS_VERBOSITY''' , snake_case__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __UpperCamelCase ( ):
return __name__.split('''.''' )[0]
def __UpperCamelCase ( ):
return logging.getLogger(_get_library_name() )
def __UpperCamelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__a : int = logging.StreamHandler() # Set sys.stderr as stream.
__a : Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
__a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__a : List[Any] = False
def __UpperCamelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__a : List[str] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__a : Tuple = None
def __UpperCamelCase ( ):
return log_levels
def __UpperCamelCase ( lowerCAmelCase__ : Optional[str] = None ):
if name is None:
__a : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case__ )
def __UpperCamelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case__ )
def __UpperCamelCase ( ):
return set_verbosity(snake_case__ )
def __UpperCamelCase ( ):
return set_verbosity(snake_case__ )
def __UpperCamelCase ( ):
return set_verbosity(snake_case__ )
def __UpperCamelCase ( ):
return set_verbosity(snake_case__ )
def __UpperCamelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __UpperCamelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __UpperCamelCase ( lowerCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case__ )
def __UpperCamelCase ( lowerCAmelCase__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case__ )
def __UpperCamelCase ( ):
_configure_library_root_logger()
__a : str = False
def __UpperCamelCase ( ):
_configure_library_root_logger()
__a : Optional[Any] = True
def __UpperCamelCase ( ):
__a : Dict = _get_library_root_logger().handlers
for handler in handlers:
__a : Dict = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(snake_case__ )
def __UpperCamelCase ( ):
__a : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case__ )
def __UpperCamelCase ( self : str , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[str] ):
__a : List[Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , snake_case__ )
if no_advisory_warnings:
return
self.warning(*snake_case__ , **snake_case__ )
lowercase__ =warning_advice
@functools.lru_cache(snake_case__ )
def __UpperCamelCase ( self : List[str] , *lowerCAmelCase__ : int , **lowerCAmelCase__ : List[Any] ):
self.warning(*snake_case__ , **snake_case__ )
lowercase__ =warning_once
class UpperCamelCase__ :
def __init__(self : int , *snake_case_ : Any , **snake_case_ : str ): # pylint: disable=unused-argument
__a : Dict = args[0] if args else None
def __iter__(self : str ):
return iter(self._iterator )
def __getattr__(self : Tuple , snake_case_ : Optional[Any] ):
def empty_fn(*snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Tuple ):
return self
def __exit__(self : Any , snake_case_ : Dict , snake_case_ : str , snake_case_ : Optional[Any] ):
return
class UpperCamelCase__ :
def __call__(self : Optional[int] , *snake_case_ : Tuple , **snake_case_ : List[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase (self : Union[str, Any] , *snake_case_ : List[Any] , **snake_case_ : Any ):
__a : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase (self : List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ =_tqdm_cls()
def __UpperCamelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __UpperCamelCase ( ):
global _tqdm_active
__a : int = True
hf_hub_utils.enable_progress_bars()
def __UpperCamelCase ( ):
global _tqdm_active
__a : Dict = False
hf_hub_utils.disable_progress_bars()
| 365 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ =10
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
for i in range(lowerCAmelCase__ , lowerCAmelCase__ ):
if array[i] == target:
return i
return -1
def __UpperCamelCase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
__a : List[Any] = 0
__a : Union[str, Any] = len(lowerCAmelCase__ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : str = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : List[str] = one_third - 1
elif array[two_third] < target:
__a : List[str] = two_third + 1
else:
__a : Dict = one_third + 1
__a : Any = two_third - 1
else:
return -1
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ):
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : Union[str, Any] = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase__ , one_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =input('Enter numbers separated by comma:\n').strip()
lowercase__ =[int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase__ =int(input('Enter the number to be found in the list:\n').strip())
lowercase__ =ite_ternary_search(collection, target)
lowercase__ =rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 90 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Any = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = original_name.split('''.''')[0]
__lowerCAmelCase = key.split('''.''')
__lowerCAmelCase = int(key_list[key_list.index(lowerCamelCase) - 2])
__lowerCAmelCase = int(key_list[key_list.index(lowerCamelCase) - 1])
__lowerCAmelCase = orig_block_num - offset
__lowerCAmelCase = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""", F"""block.{new_block_num}.{layer_num}.{new_name}""")
return key
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('''network'''):
__lowerCAmelCase = key.replace('''network''', '''poolformer.encoder''')
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''') and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase = key[: key.find('''proj''')]
__lowerCAmelCase = key.replace(lowerCamelCase, F"""patch_embeddings.{total_embed_found}.""")
__lowerCAmelCase = key.replace('''proj''', '''projection''')
if key.endswith('''bias'''):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
__lowerCAmelCase = replace_key_with_offset(lowerCamelCase, lowerCamelCase, '''mlp.fc1''', '''output.conv1''')
if "mlp.fc2" in key:
__lowerCAmelCase = replace_key_with_offset(lowerCamelCase, lowerCamelCase, '''mlp.fc2''', '''output.conv2''')
if "norm1" in key:
__lowerCAmelCase = replace_key_with_offset(lowerCamelCase, lowerCamelCase, '''norm1''', '''before_norm''')
if "norm2" in key:
__lowerCAmelCase = replace_key_with_offset(lowerCamelCase, lowerCamelCase, '''norm2''', '''after_norm''')
if "layer_scale_1" in key:
__lowerCAmelCase = replace_key_with_offset(lowerCamelCase, lowerCamelCase, '''layer_scale_1''', '''layer_scale_1''')
if "layer_scale_2" in key:
__lowerCAmelCase = replace_key_with_offset(lowerCamelCase, lowerCamelCase, '''layer_scale_2''', '''layer_scale_2''')
if "head" in key:
__lowerCAmelCase = key.replace('''head''', '''classifier''')
__lowerCAmelCase = value
return new_state_dict
def __magic_name__( ):
__lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw)
return image
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase = '''huggingface/label-files'''
__lowerCAmelCase = model_name[-3:]
__lowerCAmelCase = 1_0_0_0
__lowerCAmelCase = '''imagenet-1k-id2label.json'''
__lowerCAmelCase = (1, 1_0_0_0)
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='''dataset'''), '''r'''))
__lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase = [2, 2, 6, 2]
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s24":
__lowerCAmelCase = [4, 4, 1_2, 4]
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s36":
__lowerCAmelCase = [6, 6, 1_8, 6]
__lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1E-6
__lowerCAmelCase = 0.9
elif size == "m36":
__lowerCAmelCase = [6, 6, 1_8, 6]
__lowerCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1E-6
__lowerCAmelCase = 0.95
elif size == "m48":
__lowerCAmelCase = [8, 8, 2_4, 8]
__lowerCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1E-6
__lowerCAmelCase = 0.95
else:
raise ValueError(F"""Size {size} not supported""")
# load image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=lowerCamelCase)
# Prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values
logger.info(F"""Converting model {model_name}...""")
# load original state dict
__lowerCAmelCase = torch.load(lowerCamelCase, map_location=torch.device('''cpu'''))
# rename keys
__lowerCAmelCase = rename_keys(lowerCamelCase)
# create HuggingFace model and load state dict
__lowerCAmelCase = PoolFormerForImageClassification(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
# Define image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=lowerCamelCase)
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='''pt''').pixel_values
# forward pass
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69])
elif size == "s24":
__lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45])
elif size == "s36":
__lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98])
elif size == "m36":
__lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68])
elif size == "m48":
__lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23])
else:
raise ValueError(F"""Size {size} not supported""")
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], lowerCamelCase, atol=1E-2)
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""")
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_UpperCAmelCase : str = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 174 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Dict = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 | 1 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( snake_case_):
_SCREAMING_SNAKE_CASE : Dict = DistilBertTokenizer
_SCREAMING_SNAKE_CASE : Dict = DistilBertTokenizerFast
_SCREAMING_SNAKE_CASE : List[Any] = True
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase__ = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 356 |
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : int ) -> float:
"""simple docstring"""
lowerCAmelCase__ = x
lowerCAmelCase__ = y
for step in range(UpperCamelCase_ ): # noqa: B007
lowerCAmelCase__ = a * a - b * b + x
lowerCAmelCase__ = 2 * a * b + y
lowerCAmelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _UpperCamelCase ( UpperCamelCase_ : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCamelCase_ , 1 , 1 ) )
def _UpperCamelCase ( UpperCamelCase_ : int = 800 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = -0.6 , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 3.2 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : bool = True , ) -> Image.Image:
"""simple docstring"""
lowerCAmelCase__ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(UpperCamelCase_ ):
for image_y in range(UpperCamelCase_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase__ = figure_width / image_width * image_height
lowerCAmelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase__ = get_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase__ = get_color_coded_rgb(UpperCamelCase_ )
else:
lowerCAmelCase__ = get_black_and_white_rgb(UpperCamelCase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__snake_case : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
_A = ""
_A = ""
_A = ""
_A = ""
def a__ ( lowerCAmelCase ) -> None:
# authorize twitter, initialize tweepy
UpperCAmelCase__ : List[str] = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
UpperCAmelCase__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
UpperCAmelCase__ : int = api.user_timeline(screen_name=lowerCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
UpperCAmelCase__ : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
UpperCAmelCase__ : Any = api.user_timeline(
screen_name=lowerCAmelCase , count=2_00 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
UpperCAmelCase__ : Tuple = alltweets[-1].id - 1
print(F"""...{len(lowerCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
UpperCAmelCase__ : Optional[int] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , """w""" ) as f:
UpperCAmelCase__ : int = csv.writer(lowerCAmelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 171 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __UpperCAmelCase ( lowerCamelCase__ ):
def __get__( self : Tuple, __A : Optional[Any], __A : Optional[int]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase : str = '''__cached_''' + self.fget.__name__
UpperCAmelCase : int = getattr(__A, __A, __A )
if cached is None:
UpperCAmelCase : Any = self.fget(__A )
setattr(__A, __A, __A )
return cached
def a__ ( UpperCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Any = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_torch_fx_proxy(UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Union[str, Any]:
return isinstance(UpperCAmelCase , np.ndarray )
def a__ ( UpperCAmelCase : str ) -> Tuple:
return _is_numpy(UpperCAmelCase )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
import torch
return isinstance(UpperCAmelCase , torch.Tensor )
def a__ ( UpperCAmelCase : str ) -> List[Any]:
return False if not is_torch_available() else _is_torch(UpperCAmelCase )
def a__ ( UpperCAmelCase : Tuple ) -> List[str]:
import torch
return isinstance(UpperCAmelCase , torch.device )
def a__ ( UpperCAmelCase : Any ) -> Any:
return False if not is_torch_available() else _is_torch_device(UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ):
if hasattr(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
else:
return False
return isinstance(UpperCAmelCase , torch.dtype )
def a__ ( UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase )
def a__ ( UpperCAmelCase : Any ) -> str:
import tensorflow as tf
return isinstance(UpperCAmelCase , tf.Tensor )
def a__ ( UpperCAmelCase : int ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[str] ) -> Tuple:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCAmelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(UpperCAmelCase )
return type(UpperCAmelCase ) == tf.Tensor
def a__ ( UpperCAmelCase : int ) -> List[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCAmelCase , jnp.ndarray )
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
return False if not is_flax_available() else _is_jax(UpperCAmelCase )
def a__ ( UpperCAmelCase : int ) -> Tuple:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return [to_py_obj(UpperCAmelCase ) for o in obj]
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase ).tolist()
elif isinstance(UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( UpperCAmelCase : Any ) -> List[str]:
if isinstance(UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(UpperCAmelCase , (list, tuple) ):
return np.array(UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCAmelCase ):
return np.asarray(UpperCAmelCase )
else:
return obj
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase : int = getattr(self, class_fields[0].name )
UpperCAmelCase : str = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A, __A ):
UpperCAmelCase : Tuple = first_field.items()
UpperCAmelCase : Any = True
else:
try:
UpperCAmelCase : Optional[Any] = iter(__A )
UpperCAmelCase : Optional[Any] = True
except TypeError:
UpperCAmelCase : Optional[int] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A, (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0], __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
UpperCAmelCase : Union[str, Any] = element[1]
elif first_field is not None:
UpperCAmelCase : Union[str, Any] = first_field
else:
for field in class_fields:
UpperCAmelCase : Optional[Any] = getattr(self, field.name )
if v is not None:
UpperCAmelCase : Optional[int] = v
def __delitem__( self : Union[str, Any], *__A : str, **__A : Tuple ):
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : List[str], *__A : Union[str, Any], **__A : Optional[Any] ):
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Any, *__A : Dict, **__A : str ):
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ ( self : Dict, *__A : int, **__A : Dict ):
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str], __A : List[str] ):
if isinstance(__A, __A ):
UpperCAmelCase : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any], __A : Dict, __A : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A, __A )
super().__setattr__(__A, __A )
def __setitem__( self : Dict, __A : List[Any], __A : Union[str, Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A, __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A, __A )
def __magic_name__ ( self : List[str] ):
return tuple(self[k] for k in self.keys() )
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@classmethod
def __magic_name__ ( cls : List[Any], __A : Tuple ):
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """longest"""
UpperCamelCase = """max_length"""
UpperCamelCase = """do_not_pad"""
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """pt"""
UpperCamelCase = """tf"""
UpperCamelCase = """np"""
UpperCamelCase = """jax"""
class __UpperCAmelCase :
def __init__( self : Any, __A : List[ContextManager] ):
UpperCAmelCase : Tuple = context_managers
UpperCAmelCase : Tuple = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : List[Any], *__A : Union[str, Any], **__A : Dict ):
self.stack.__exit__(*__A, **__A )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> str:
UpperCAmelCase : int = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( UpperCAmelCase : Dict ) -> Any:
UpperCAmelCase : List[Any] = model_class.__name__
UpperCAmelCase : Union[str, Any] = infer_framework(UpperCAmelCase )
if framework == "tf":
UpperCAmelCase : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( UpperCAmelCase : MutableMapping , UpperCAmelCase : str = "" , UpperCAmelCase : str = "." ) -> Union[str, Any]:
def _flatten_dict(UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="" , UpperCAmelCase : Any="." ):
for k, v in d.items():
UpperCAmelCase : List[str] = str(UpperCAmelCase ) + delimiter + str(UpperCAmelCase ) if parent_key else k
if v and isinstance(UpperCAmelCase , UpperCAmelCase ):
yield from flatten_dict(UpperCAmelCase , UpperCAmelCase , delimiter=UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
@contextmanager
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : bool = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=None ) -> Optional[Any]:
if is_numpy_array(UpperCAmelCase ):
return np.transpose(UpperCAmelCase , axes=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.T if axes is None else array.permute(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(UpperCAmelCase , perm=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.transpose(UpperCAmelCase , axes=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Optional[int] ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.reshape(*UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.reshape(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=None ) -> Any:
if is_numpy_array(UpperCAmelCase ):
return np.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.squeeze(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
if is_numpy_array(UpperCAmelCase ):
return np.expand_dims(UpperCAmelCase , UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.unsqueeze(dim=UpperCAmelCase )
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return jnp.expand_dims(UpperCAmelCase , axis=UpperCAmelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : Dict ) -> List[str]:
if is_numpy_array(UpperCAmelCase ):
return np.size(UpperCAmelCase )
elif is_torch_tensor(UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(UpperCAmelCase ):
import tensorflow as tf
return tf.size(UpperCAmelCase )
elif is_jax_tensor(UpperCAmelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCAmelCase )}.''' )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Dict:
for key, value in auto_map.items():
if isinstance(UpperCAmelCase , (tuple, list) ):
UpperCAmelCase : List[Any] = [f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase : List[Any] = f'''{repo_id}--{value}'''
return auto_map
def a__ ( UpperCAmelCase : Tuple ) -> Union[str, Any]:
for base_class in inspect.getmro(UpperCAmelCase ):
UpperCAmelCase : Any = base_class.__module__
UpperCAmelCase : Dict = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 336 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionInstructPixaPixPipeline
A_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self: str ) -> Any:
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_A = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(__A )
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self: List[str] , __A: List[str] , __A: Any=0 ) -> List[str]:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' )
if str(__A ).startswith('''mps''' ):
_A = torch.manual_seed(__A )
else:
_A = torch.Generator(device=__A ).manual_seed(__A )
_A = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self: Any ) -> Union[str, Any]:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__A )
_A = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A )
_A = sd_pipe(**__A ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: int ) -> int:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__A )
_A = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A )
_A = '''french fries'''
_A = sd_pipe(**__A , negative_prompt=__A )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: List[str] ) -> Optional[Any]:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__A )
_A = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A )
_A = [inputs['''prompt''']] * 2
_A = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
_A = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
_A = image / 2 + 0.5
_A = image.permute(0 , 3 , 1 , 2 )
_A = image.repeat(2 , 1 , 1 , 1 )
_A = sd_pipe(**__A ).images
_A = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_A = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: Any ) -> Any:
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_A = StableDiffusionInstructPixaPixPipeline(**__A )
_A = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A )
_A = sd_pipe(**__A ).images
_A = image[0, -3:, -3:, -1]
_A = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__A ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __A ( self: str ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __A ( self: Optional[int] ) -> str:
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**__A )
_A = VaeImageProcessor(do_resize=__A , do_normalize=__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type='''pt''' ) )[0]
_A = components['''vae''']
_A = self.get_dummy_inputs_by_type(__A , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_A = vae.encode(inputs[image_param] ).latent_dist.mode()
_A = pipe(**__A )[0]
_A = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: str ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: Dict , __A: Any=0 ) -> Optional[int]:
_A = torch.manual_seed(__A )
_A = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
_A = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self: str ) -> str:
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__A ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self: Optional[Any] ) -> Tuple:
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__A )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__A ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self: List[str] ) -> str:
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__A )
_A = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**__A ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self: List[Any] ) -> List[str]:
_A = 0
def callback_fn(__A: int , __A: int , __A: torch.FloatTensor ) -> None:
_A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_A = False
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__A , torch_dtype=torch.floataa )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self: str ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__A , torch_dtype=torch.floataa )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = self.get_inputs()
_A = pipe(**__A )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __A ( self: int ) -> Tuple:
_A = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_A = inputs['''image'''].resize((5_04, 5_04) )
_A = '''timbrooks/instruct-pix2pix'''
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
_A = pipe(**__A )
_A = output.images[0]
_A = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_A = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 75 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=12 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , UpperCAmelCase=0 , UpperCAmelCase=None , ) -> Optional[int]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = projection_dim
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = scope
_snake_case = bos_token_id
def lowercase (self ) -> str:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_snake_case = input_mask.numpy()
_snake_case, _snake_case = input_mask.shape
_snake_case = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
_snake_case = 1
_snake_case = 0
_snake_case = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCAmelCase )
def lowercase (self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_snake_case = TFBlipTextModel(config=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , attention_mask=UpperCAmelCase , training=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase (self ) -> Dict:
_snake_case = self.prepare_config_and_inputs()
_snake_case, _snake_case, _snake_case = config_and_inputs
_snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> Tuple:
_snake_case = BlipTextModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowercase (self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowercase (self ) -> int:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowercase (self ) -> Optional[Any]:
pass
def lowercase (self ) -> str:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def lowercase (self ) -> List[Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowercase (self ) -> Tuple:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowercase (self ) -> Optional[int]:
pass
@slow
def lowercase (self ) -> Union[str, Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFBlipTextModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self , UpperCAmelCase=True ) -> Optional[int]:
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCAmelCase ) | 341 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 341 | 1 |
"""simple docstring"""
__UpperCamelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__UpperCamelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowercase (SCREAMING_SNAKE_CASE_ : dict[int, list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[bool] ) -> list[int]:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
order.append(SCREAMING_SNAKE_CASE_ )
return order
def lowercase (SCREAMING_SNAKE_CASE_ : dict[int, list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[bool] ) -> list[int]:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return component
def lowercase (SCREAMING_SNAKE_CASE_ : dict[int, list[int]] ) -> list[list[int]]:
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) * [False]
SCREAMING_SNAKE_CASE = {vert: [] for vert in range(len(SCREAMING_SNAKE_CASE_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = []
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE_ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE = order[len(SCREAMING_SNAKE_CASE_ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE = find_components(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
components_list.append(SCREAMING_SNAKE_CASE_ )
return components_list
| 38 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase = '''true'''
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=82 , SCREAMING_SNAKE_CASE_ : List[Any]=16 ) -> Union[str, Any]:
set_seed(42 )
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = RegressionDataset(length=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
model.to(accelerator.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return model, ddp_model, dataloader
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[str] ):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ : Optional[int] ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=16 )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE_ , split_batches=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = get_dataloader(SCREAMING_SNAKE_CASE_ , not dispatch_batches )
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase (SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE_ )
targs.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(SCREAMING_SNAKE_CASE_ ), torch.cat(SCREAMING_SNAKE_CASE_ )
return logits, targs
def lowercase (SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : Optional[Any]=82 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 ) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert (
len(SCREAMING_SNAKE_CASE_ ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE_ )}'
def lowercase (SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE_ )
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=batch['labels'] )
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowercase () -> Dict:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=SCREAMING_SNAKE_CASE_ , dispatch_batches=SCREAMING_SNAKE_CASE_ )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE_ , 5_12 )
accelerator.state._reset_state()
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 38 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = TransfoXLTokenizer
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ (self : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : int , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = '''<unk> UNwanted , running'''
lowercase__ = '''<unk> unwanted, running'''
return input_text, output_text
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCamelCase )
lowercase__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(UpperCamelCase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [0, 4, 8, 7] )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=UpperCamelCase )
lowercase__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = len(UpperCamelCase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCamelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 2 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
lowerCamelCase : Tuple = 'naver-clova-ix/donut-base'
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__ = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 2 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _lowercase :
def __init__( self : List[Any] , snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCamelCase_ : Optional[int] = len(_A ) - 1
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase_ : Any = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_A ) , 5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCamelCase_ : int = self.basis_function(_A )
UpperCamelCase_ : Optional[int] = 0.0
UpperCamelCase_ : List[str] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Union[str, Any] = 0.01 ) -> List[str]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCamelCase_ : str = [] # x coordinates of points to plot
UpperCamelCase_ : Optional[int] = [] # y coordinates of points to plot
UpperCamelCase_ : List[Any] = 0.0
while t <= 1:
UpperCamelCase_ : Optional[Any] = self.bezier_curve_function(_A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
UpperCamelCase_ : int = [i[0] for i in self.list_of_points]
UpperCamelCase_ : Optional[int] = [i[1] for i in self.list_of_points]
plt.plot(
_A , _A , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(_A , _A , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 363 | import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a_ = random.Random()
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[int]=None ):
if rng is None:
UpperCamelCase_ : Union[str, Any] = global_rng
UpperCamelCase_ : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : str=7 , snake_case : Tuple=4_0_0 , snake_case : List[Any]=2_0_0_0 , snake_case : Optional[Any]=2_4 , snake_case : Tuple=2_4 , snake_case : Dict=0.0 , snake_case : Any=1_6_0_0_0 , snake_case : Tuple=True , snake_case : List[str]=True , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = parent
UpperCamelCase_ : int = batch_size
UpperCamelCase_ : str = min_seq_length
UpperCamelCase_ : str = max_seq_length
UpperCamelCase_ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ : int = feature_size
UpperCamelCase_ : Optional[int] = num_mel_bins
UpperCamelCase_ : str = padding_value
UpperCamelCase_ : Union[str, Any] = sampling_rate
UpperCamelCase_ : Tuple = return_attention_mask
UpperCamelCase_ : List[str] = do_normalize
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Dict=False , snake_case : List[str]=False ) -> int:
"""simple docstring"""
def _flatten(snake_case : Optional[Any] ):
return list(itertools.chain(*snake_case ) )
if equal_length:
UpperCamelCase_ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ : List[str] = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = SpeechaTextFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = SpeechaTextFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : str ) -> Tuple:
"""simple docstring"""
self.assertTrue(np.all(np.mean(snake_case , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : List[Any] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ : Tuple = feature_extractor(snake_case , padding=snake_case , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
UpperCamelCase_ : str = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test batched
UpperCamelCase_ : Union[str, Any] = feature_extractor(snake_case , return_tensors='np' ).input_features
UpperCamelCase_ : List[str] = feature_extractor(snake_case , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ : int = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase_ : List[str] = np.asarray(snake_case )
UpperCamelCase_ : Any = feature_extractor(snake_case , return_tensors='np' ).input_features
UpperCamelCase_ : str = feature_extractor(snake_case , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : Union[str, Any] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_ : Tuple = [None, 1_6, None]
for max_length, padding in zip(snake_case , snake_case ):
UpperCamelCase_ : Optional[Any] = feature_extractor(
snake_case , padding=snake_case , max_length=snake_case , return_attention_mask=snake_case )
UpperCamelCase_ : List[str] = inputs.input_features
UpperCamelCase_ : List[str] = inputs.attention_mask
UpperCamelCase_ : Optional[int] = [np.sum(snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : List[str] = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_ : Optional[Any] = [None, 1_6, None]
for max_length, padding in zip(snake_case , snake_case ):
UpperCamelCase_ : Any = feature_extractor(
snake_case , max_length=snake_case , padding=snake_case , return_tensors='np' , return_attention_mask=snake_case )
UpperCamelCase_ : int = inputs.input_features
UpperCamelCase_ : Optional[int] = inputs.attention_mask
UpperCamelCase_ : str = [np.sum(snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : str = feature_extractor(
snake_case , padding='max_length' , max_length=4 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : int = inputs.input_features
UpperCamelCase_ : Union[str, Any] = inputs.attention_mask
UpperCamelCase_ : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : Any = feature_extractor(
snake_case , padding='longest' , max_length=4 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : Dict = inputs.input_features
UpperCamelCase_ : List[Any] = inputs.attention_mask
UpperCamelCase_ : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
UpperCamelCase_ : Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_ : int = feature_extractor(
snake_case , padding='longest' , max_length=1_6 , truncation=snake_case , return_tensors='np' , return_attention_mask=snake_case , )
UpperCamelCase_ : Dict = inputs.input_features
UpperCamelCase_ : Union[str, Any] = inputs.attention_mask
UpperCamelCase_ : Dict = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
import torch
UpperCamelCase_ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : Optional[Any] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
UpperCamelCase_ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ : Tuple = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Tuple ) -> Dict:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase_ : Optional[Any] = ds.sort('id' ).select(range(snake_case ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase_ : str = self._load_datasamples(1 )
UpperCamelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ : str = feature_extractor(snake_case , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , snake_case , atol=1e-4 ) )
| 50 | 0 |
def __magic_name__ ( __lowerCAmelCase : str ) -> bool:
__lowerCamelCase = [int(__lowerCAmelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(__lowerCAmelCase ) == 4 and all(0 <= int(__lowerCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = input().strip()
SCREAMING_SNAKE_CASE__ : Optional[int] = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 270 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return "".join(sorted(SCREAMING_SNAKE_CASE__ ) )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[str]:
return word_by_signature[signature(SCREAMING_SNAKE_CASE__ )]
lowercase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowercase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
lowercase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowercase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 285 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __snake_case ( lowerCAmelCase ):
_a : Dict= "mobilenet_v1"
def __init__( self ,snake_case=3 ,snake_case=224 ,snake_case=1.0 ,snake_case=8 ,snake_case="relu6" ,snake_case=True ,snake_case=0.999 ,snake_case=0.02 ,snake_case=0.001 ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowercase : int = num_channels
lowercase : Union[str, Any] = image_size
lowercase : int = depth_multiplier
lowercase : Tuple = min_depth
lowercase : Dict = hidden_act
lowercase : Dict = tf_padding
lowercase : Dict = classifier_dropout_prob
lowercase : int = initializer_range
lowercase : List[str] = layer_norm_eps
class __snake_case ( lowerCAmelCase ):
_a : int= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 285 | 1 |
'''simple docstring'''
def __magic_name__( lowerCamelCase, lowerCamelCase):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __magic_name__( lowerCamelCase, lowerCamelCase=0):
return sorted(A__, key=lambda lowerCamelCase: x[column])
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=float('''inf''')):
for i in range(points_counts - 1):
for j in range(i + 1, A__):
__lowerCAmelCase = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
__lowerCAmelCase = current_dis
return min_dis
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=float('''inf''')):
for i in range(min(6, points_counts - 1), A__):
for j in range(max(0, i - 6), A__):
__lowerCAmelCase = euclidean_distance_sqr(points[i], points[j])
if current_dis < min_dis:
__lowerCAmelCase = current_dis
return min_dis
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
# base case
if points_counts <= 3:
return dis_between_closest_pair(A__, A__)
# recursion
__lowerCAmelCase = points_counts // 2
__lowerCAmelCase = closest_pair_of_points_sqr(
A__, points_sorted_on_y[:mid], A__)
__lowerCAmelCase = closest_pair_of_points_sqr(
A__, points_sorted_on_y[mid:], points_counts - mid)
__lowerCAmelCase = min(A__, A__)
__lowerCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(A__)
__lowerCAmelCase = dis_between_closest_in_strip(
A__, len(A__), A__)
return min(A__, A__)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = column_based_sort(A__, column=0)
__lowerCAmelCase = column_based_sort(A__, column=1)
return (
closest_pair_of_points_sqr(
A__, A__, A__)
) ** 0.5
if __name__ == "__main__":
_UpperCAmelCase : Dict = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 174 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
# setable values
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None # sigma(t_i)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]:
return cls()
@dataclass
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
class UpperCamelCase_ (__A , __A ):
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return True
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
return KarrasVeSchedulerState.create()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState:
UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
UpperCAmelCase_ : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 )
UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma
UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
raise NotImplementedError()
| 268 | 0 |
"""simple docstring"""
import requests
SCREAMING_SNAKE_CASE__ = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> None:
"""simple docstring"""
snake_case = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 149 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE__ = "hf-internal-testing/tiny-random-bert"
SCREAMING_SNAKE_CASE__ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
SCREAMING_SNAKE_CASE__ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase , lowerCAmelCase ) ) )
with open(os.path.join(lowerCAmelCase , 'refs' , 'main' ) ) as f:
snake_case = f.read()
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , 'snapshots' , lowerCAmelCase , lowerCAmelCase ) )
self.assertTrue(os.path.isfile(lowerCAmelCase ) )
# File is cached at the same place the second time.
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase , revision='9b8c223' )
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , 'snapshots' , lowerCAmelCase , lowerCAmelCase ) )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid model identifier' ):
snake_case = cached_file('tiny-random-bert' , lowerCAmelCase )
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid git identifier' ):
snake_case = cached_file(lowerCAmelCase , lowerCAmelCase , revision='aaaa' )
with self.assertRaisesRegex(lowerCAmelCase , 'does not appear to have a file named' ):
snake_case = cached_file(lowerCAmelCase , 'conf' )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase , 'does not appear to have a file named' ):
snake_case = cached_file(lowerCAmelCase , 'conf' )
with open(os.path.join(lowerCAmelCase , 'refs' , 'main' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , '.no_exist' , lowerCAmelCase , 'conf' ) ) )
snake_case = cached_file(lowerCAmelCase , 'conf' , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
snake_case = cached_file(lowerCAmelCase , 'conf' , local_files_only=lowerCAmelCase , _raise_exceptions_for_missing_entries=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase ) as mock_head:
snake_case = cached_file(lowerCAmelCase , 'conf' , _raise_exceptions_for_connection_errors=lowerCAmelCase )
self.assertIsNone(lowerCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self ):
"""simple docstring"""
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , lowerCAmelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowerCAmelCase ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowerCAmelCase ) )
def snake_case ( self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , lowerCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , lowerCAmelCase , revision='ahaha' )
snake_case = get_file_from_repo('bert-base-cased' , lowerCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(lowerCAmelCase , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_68 )
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(lowerCAmelCase ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase , 'a.txt' ) , str(lowerCAmelCase ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase , 'b.txt' ) )
| 149 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A (self ) -> Optional[Any]:
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_lowercase =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_lowercase ='''xvjiarui/stable-diffusion-2-inpainting'''
_lowercase , _lowercase =FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase , safety_checker=UpperCAmelCase )
_lowercase ='''Face of a yellow cat, high resolution, sitting on a park bench'''
_lowercase =jax.random.PRNGKey(0 )
_lowercase =5_0
_lowercase =jax.device_count()
_lowercase =num_samples * [prompt]
_lowercase =num_samples * [init_image]
_lowercase =num_samples * [mask_image]
_lowercase , _lowercase , _lowercase =pipeline.prepare_inputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# shard inputs and rng
_lowercase =replicate(UpperCAmelCase )
_lowercase =jax.random.split(UpperCAmelCase , jax.device_count() )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =shard(UpperCAmelCase )
_lowercase =pipeline(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase )
_lowercase =output.images.reshape(UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
_lowercase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowercase =jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 5 |
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_snake_case , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_snake_case , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_snake_case , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_snake_case , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_snake_case , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_snake_case , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_snake_case , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_snake_case , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_snake_case , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_snake_case , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_snake_case , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_snake_case , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_snake_case , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_snake_case , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_snake_case , required=_snake_case , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_snake_case , help="""Model ID to upload to on the Hugging Face Hub.""" )
UpperCAmelCase = parser.parse_args()
return args
def _a ( _snake_case ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_snake_case )
tf.tpu.experimental.initialize_tpu_system(_snake_case )
return tpu
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
for file in file_list:
UpperCAmelCase = file.split("""/""" )[-1]
UpperCAmelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , _snake_case ).group(1 )
UpperCAmelCase = int(_snake_case )
num_samples += sample_count
return num_samples
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = count_samples(_snake_case )
UpperCAmelCase = tf.data.Dataset.from_tensor_slices(_snake_case )
if shuffle:
UpperCAmelCase = dataset.shuffle(len(_snake_case ) )
UpperCAmelCase = tf.data.TFRecordDataset(_snake_case , num_parallel_reads=_snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(_snake_case ) )
UpperCAmelCase = dataset.map(_snake_case , num_parallel_calls=_snake_case )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase = dataset.batch(_snake_case , drop_remainder=_snake_case )
UpperCAmelCase = dataset.map(_snake_case , num_parallel_calls=_snake_case )
UpperCAmelCase = dataset.prefetch(_snake_case )
return dataset
def _a ( _snake_case ):
"""simple docstring"""
if not args.no_tpu:
UpperCAmelCase = initialize_tpu(_snake_case )
UpperCAmelCase = tf.distribute.TPUStrategy(_snake_case )
else:
UpperCAmelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase = count_samples(_snake_case )
UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase = TFAutoModelForMaskedLM.from_config(_snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase , UpperCAmelCase = create_optimizer(
num_train_steps=_snake_case , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_snake_case , metrics=["""accuracy"""] )
def decode_fn(_snake_case ):
UpperCAmelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_snake_case , _snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm_probability=args.mlm_probability , mlm=_snake_case , return_tensors="""tf""" )
def mask_with_collator(_snake_case ):
# TF really needs an isin() function
UpperCAmelCase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
UpperCAmelCase , UpperCAmelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_snake_case ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_snake_case , )
return batch
UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase = prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase = prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , )
UpperCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_snake_case ) )
model.fit(
_snake_case , validation_data=_snake_case , epochs=args.num_epochs , callbacks=_snake_case , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_UpperCamelCase = parse_args()
main(args)
| 234 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_UpperCamelCase = None
_UpperCamelCase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_UpperCamelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _a ( _snake_case , _snake_case=1 , _snake_case=256 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( _snake_case ):
"""simple docstring"""
with open(_snake_case , """r""" ) as f:
return json.load(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
with open(_snake_case , """w""" ) as f:
json.dump(_snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True ):
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = os.path.join(_snake_case , """tmp""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = read_json(os.path.join(_snake_case , """params.json""" ) )
UpperCAmelCase = NUM_SHARDS[model_size]
UpperCAmelCase = params["""n_layers"""]
UpperCAmelCase = params["""n_heads"""]
UpperCAmelCase = n_heads // num_shards
UpperCAmelCase = params["""dim"""]
UpperCAmelCase = dim // n_heads
UpperCAmelCase = 10000.0
UpperCAmelCase = 1.0 / (base ** (torch.arange(0 , _snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
UpperCAmelCase = n_heads_per_shard // num_key_value_heads
UpperCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase = n_heads
UpperCAmelCase = n_heads_per_shard
UpperCAmelCase = dim
# permute for sliced rotary
def permute(_snake_case , _snake_case=n_heads , _snake_case=dim , _snake_case=dim ):
return w.view(_snake_case , dima // n_heads // 2 , 2 , _snake_case ).transpose(1 , 2 ).reshape(_snake_case , _snake_case )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase = torch.load(os.path.join(_snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCAmelCase = [
torch.load(os.path.join(_snake_case , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(_snake_case )
]
UpperCAmelCase = 0
UpperCAmelCase = {"""weight_map""": {}}
for layer_i in range(_snake_case ):
UpperCAmelCase = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) )
UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case , )
UpperCAmelCase = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase = inv_freq
for k, v in state_dict.items():
UpperCAmelCase = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
UpperCAmelCase = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
# Write configs
UpperCAmelCase = {"""total_size""": param_count * 2}
write_json(_snake_case , os.path.join(_snake_case , """pytorch_model.bin.index.json""" ) )
UpperCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
UpperCAmelCase = LlamaConfig(
hidden_size=_snake_case , intermediate_size=compute_intermediate_size(_snake_case , _snake_case , _snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_snake_case , )
config.save_pretrained(_snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCAmelCase = LlamaForCausalLM.from_pretrained(_snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=_snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_snake_case , safe_serialization=_snake_case )
shutil.rmtree(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
UpperCAmelCase = tokenizer_class(_snake_case )
tokenizer.save_pretrained(_snake_case )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_snake_case , help="""Whether or not to save using `safetensors`.""" )
UpperCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _snake_case )
if __name__ == "__main__":
main()
| 234 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 37 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str:
if not conversation_id:
lowerCAmelCase__ : List[str] = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase__ : List[Any] = []
if generated_responses is None:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : uuid.UUID = conversation_id
lowerCAmelCase__ : List[str] = past_user_inputs
lowerCAmelCase__ : List[str] = generated_responses
lowerCAmelCase__ : Optional[str] = text
def __eq__( self ,__UpperCAmelCase ) -> Dict:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
lowerCAmelCase__ : Optional[int] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowerCAmelCase__ : Optional[Any] = text
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase__ : Union[str, Any] = None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
self.generated_responses.append(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Tuple:
lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowerCAmelCase__ : Any = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase__ : Tuple = self.tokenizer.eos_token
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : List[str] = {}
if min_length_for_response is not None:
lowerCAmelCase__ : Any = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase__ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase__ : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowerCAmelCase__ : str = max_length - minimum_tokens
lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:]
lowerCAmelCase__ : str = model_inputs.pop("""conversation""" )
lowerCAmelCase__ : Union[str, Any] = max_length
lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
lowerCAmelCase__ : int = 1
else:
lowerCAmelCase__ : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]:
lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""]
lowerCAmelCase__ : Tuple = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id
lowerCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 37 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Dict = KandinskyVaaPipeline
lowerCAmelCase_ : Any = [
"image_embeds",
"negative_image_embeds",
]
lowerCAmelCase_ : Any = ["image_embeds", "negative_image_embeds"]
lowerCAmelCase_ : Union[str, Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase_ : str = False
@property
def lowercase__ ( self : Dict ):
return 32
@property
def lowercase__ ( self : Optional[Any] ):
return 32
@property
def lowercase__ ( self : Dict ):
return self.time_input_dim
@property
def lowercase__ ( self : Any ):
return self.time_input_dim * 4
@property
def lowercase__ ( self : Union[str, Any] ):
return 100
@property
def lowercase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase : Any = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase : Dict = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def lowercase__ ( self : Tuple ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = self.dummy_unet
lowerCAmelCase : Union[str, Any] = self.dummy_movq
lowerCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCAmelCase_ , )
lowerCAmelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase__ ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=0 ):
lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCAmelCase : Optional[Any] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase : List[str] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Tuple = 'cpu'
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : List[Any] = self.pipeline_class(**UpperCAmelCase_ )
lowerCAmelCase : Any = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(UpperCAmelCase_ ) )
lowerCAmelCase : Union[str, Any] = output.images
lowerCAmelCase : Dict = pipe(
**self.get_dummy_inputs(UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ , )[0]
lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Tuple = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase_ )
lowerCAmelCase : str = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase : str = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : Any = 'red cat, 4k photo'
lowerCAmelCase : int = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase : Dict = pipe_prior(
UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase : List[str] = torch.Generator(device='cuda' ).manual_seed(0 )
lowerCAmelCase : int = pipeline(
image_embeds=UpperCAmelCase_ , negative_image_embeds=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=100 , output_type='np' , )
lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ )
| 323 |
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a : Dict = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
a : List[str] = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
a : Optional[Any] = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
a : Optional[Any] = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
a : List[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
a : int = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
snake_case_ = checkpoint[F"{old_prefix}.in_layers.0.weight"]
snake_case_ = checkpoint[F"{old_prefix}.in_layers.0.bias"]
snake_case_ = checkpoint[F"{old_prefix}.in_layers.2.weight"]
snake_case_ = checkpoint[F"{old_prefix}.in_layers.2.bias"]
snake_case_ = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
snake_case_ = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
snake_case_ = checkpoint[F"{old_prefix}.out_layers.0.weight"]
snake_case_ = checkpoint[F"{old_prefix}.out_layers.0.bias"]
snake_case_ = checkpoint[F"{old_prefix}.out_layers.3.weight"]
snake_case_ = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
snake_case_ = checkpoint[F"{old_prefix}.skip_connection.weight"]
snake_case_ = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ ,snake_case_ ,snake_case_ = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3, dim=0 )
snake_case_ ,snake_case_ ,snake_case_ = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3, dim=0 )
snake_case_ = checkpoint[F"{old_prefix}.norm.weight"]
snake_case_ = checkpoint[F"{old_prefix}.norm.bias"]
snake_case_ = weight_q.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_q.squeeze(-1 ).squeeze(-1 )
snake_case_ = weight_k.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_k.squeeze(-1 ).squeeze(-1 )
snake_case_ = weight_v.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_v.squeeze(-1 ).squeeze(-1 )
snake_case_ = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
snake_case_ = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
snake_case_ = {}
snake_case_ = checkpoint['''time_embed.0.weight''']
snake_case_ = checkpoint['''time_embed.0.bias''']
snake_case_ = checkpoint['''time_embed.2.weight''']
snake_case_ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
snake_case_ = checkpoint['''label_emb.weight''']
snake_case_ = checkpoint['''input_blocks.0.0.weight''']
snake_case_ = checkpoint['''input_blocks.0.0.bias''']
snake_case_ = unet_config['''down_block_types''']
snake_case_ = unet_config['''layers_per_block''']
snake_case_ = unet_config['''attention_head_dim''']
snake_case_ = unet_config['''block_out_channels''']
snake_case_ = 1
snake_case_ = channels_list[0]
for i, layer_type in enumerate(__UpperCAmelCase ):
snake_case_ = channels_list[i]
snake_case_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__UpperCAmelCase ):
snake_case_ = F"down_blocks.{i}.resnets.{j}"
snake_case_ = F"input_blocks.{current_layer}.0"
snake_case_ = True if j == 0 and downsample_block_has_skip else False
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, has_skip=__UpperCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__UpperCAmelCase ):
snake_case_ = F"down_blocks.{i}.resnets.{j}"
snake_case_ = F"input_blocks.{current_layer}.0"
snake_case_ = True if j == 0 and downsample_block_has_skip else False
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, has_skip=__UpperCAmelCase )
snake_case_ = F"down_blocks.{i}.attentions.{j}"
snake_case_ = F"input_blocks.{current_layer}.1"
snake_case_ = convert_attention(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
current_layer += 1
if i != len(__UpperCAmelCase ) - 1:
snake_case_ = F"down_blocks.{i}.downsamplers.0"
snake_case_ = F"input_blocks.{current_layer}.0"
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
current_layer += 1
snake_case_ = current_channels
# hardcoded the mid-block for now
snake_case_ = '''mid_block.resnets.0'''
snake_case_ = '''middle_block.0'''
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = '''mid_block.attentions.0'''
snake_case_ = '''middle_block.1'''
snake_case_ = convert_attention(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = '''mid_block.resnets.1'''
snake_case_ = '''middle_block.2'''
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = 0
snake_case_ = unet_config['''up_block_types''']
for i, layer_type in enumerate(__UpperCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ = F"up_blocks.{i}.resnets.{j}"
snake_case_ = F"output_blocks.{current_layer}.0"
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, has_skip=__UpperCAmelCase )
current_layer += 1
if i != len(__UpperCAmelCase ) - 1:
snake_case_ = F"up_blocks.{i}.upsamplers.0"
snake_case_ = F"output_blocks.{current_layer-1}.1"
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ = F"up_blocks.{i}.resnets.{j}"
snake_case_ = F"output_blocks.{current_layer}.0"
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, has_skip=__UpperCAmelCase )
snake_case_ = F"up_blocks.{i}.attentions.{j}"
snake_case_ = F"output_blocks.{current_layer}.1"
snake_case_ = convert_attention(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
current_layer += 1
if i != len(__UpperCAmelCase ) - 1:
snake_case_ = F"up_blocks.{i}.upsamplers.0"
snake_case_ = F"output_blocks.{current_layer-1}.2"
snake_case_ = convert_resnet(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
snake_case_ = checkpoint['''out.0.weight''']
snake_case_ = checkpoint['''out.0.bias''']
snake_case_ = checkpoint['''out.2.weight''']
snake_case_ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
a : Any = parser.parse_args()
a : List[Any] = strabool(args.class_cond)
a : Any = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a : str = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a : List[str] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a : Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a : List[Any] = None
a : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
a : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a : List[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a : Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
a : Dict = CMStochasticIterativeScheduler(**scheduler_config)
a : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class a ( _lowerCamelCase ):
snake_case_ = "big_bird"
def __init__( self : Union[str, Any] , lowercase_ : List[Any]=5_0358 , lowercase_ : Tuple=768 , lowercase_ : Dict=12 , lowercase_ : str=12 , lowercase_ : Tuple=3072 , lowercase_ : Any="gelu_new" , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=4096 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=1e-12 , lowercase_ : Tuple=True , lowercase_ : Tuple=0 , lowercase_ : str=1 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=66 , lowercase_ : Optional[int]="block_sparse" , lowercase_ : Any=True , lowercase_ : List[str]=False , lowercase_ : Any=64 , lowercase_ : Tuple=3 , lowercase_ : Tuple=None , **lowercase_ : Tuple , ):
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , sep_token_id=lowercase_ , **lowercase_ , )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = rescale_embeddings
snake_case_ = attention_type
snake_case_ = use_bias
snake_case_ = block_size
snake_case_ = num_random_blocks
snake_case_ = classifier_dropout
class a ( _lowerCamelCase ):
@property
def A_ ( self : str ):
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 56 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCamelCase : Dict = logging.getLogger(__name__)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """summarization"""
UpperCamelCase_ = ["""loss"""]
UpperCamelCase_ = ROUGE_KEYS
UpperCamelCase_ = """rouge2"""
def __init__( self : Dict , UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ):
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE : List[str] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE : Tuple = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.config.model_type
SCREAMING_SNAKE_CASE : int = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE : Any = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE : Any = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE : Optional[Any] = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE : str = hparams.num_workers
SCREAMING_SNAKE_CASE : int = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE : Dict = self.decoder_start_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.config.max_length
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __A ( self : int , UpperCamelCase__ : Dict[str, torch.Tensor] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE : str = True
return readable_batch
def __A ( self : int , UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : List[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def __A ( self : Union[str, Any] , UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE : Dict = batch['''labels''']
if isinstance(self.model , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : int = self.model._shift_right(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE : int = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : List[str] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
SCREAMING_SNAKE_CASE : List[Any] = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.tokenizer.pad_token_id
def __A ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._step(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
SCREAMING_SNAKE_CASE : List[str] = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE : List[Any] = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE : Optional[int] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __A ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any="val" ):
'''simple docstring'''
self.step_count += 1
SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE : List[Any] = losses['''loss''']
SCREAMING_SNAKE_CASE : Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE : List[str] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE : str = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def __A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE : int = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE : List[str] = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE : str = self._step(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(self.loss_names , UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def __A ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix='''test''' )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.n_obs[type_path]
SCREAMING_SNAKE_CASE : List[Any] = self.target_lens[type_path]
SCREAMING_SNAKE_CASE : Optional[int] = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def __A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE : Dict = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def __A ( self : int ):
'''simple docstring'''
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def __A ( self : int ):
'''simple docstring'''
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __A ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=UpperCamelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=UpperCamelCase__ )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=UpperCamelCase__ )
parser.add_argument('''--max_tokens_per_batch''' , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument('''--logger_name''' , type=UpperCamelCase__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=UpperCamelCase__ , default='''summarization''' , required=UpperCamelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument('''--src_lang''' , type=UpperCamelCase__ , default='''''' , required=UpperCamelCase__ )
parser.add_argument('''--tgt_lang''' , type=UpperCamelCase__ , default='''''' , required=UpperCamelCase__ )
parser.add_argument('''--eval_beams''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
'''--val_metric''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """translation"""
UpperCamelCase_ = ["""loss"""]
UpperCamelCase_ = ["""bleu"""]
UpperCamelCase_ = """bleu"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : str ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = hparams.src_lang
SCREAMING_SNAKE_CASE : Any = hparams.tgt_lang
def __A ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def A ( _lowercase , _lowercase=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(_lowercase )
else:
SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(_lowercase )
SCREAMING_SNAKE_CASE : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE : List[Any] = os.environ.get('''WANDB_PROJECT''' , _lowercase )
SCREAMING_SNAKE_CASE : List[str] = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE : str = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE : int = ''''''
SCREAMING_SNAKE_CASE : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_lowercase ) )
if checkpoints:
SCREAMING_SNAKE_CASE : List[Any] = checkpoints[-1]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
__UpperCamelCase : List[str] = pl.Trainer.add_argparse_args(parser)
__UpperCamelCase : Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCamelCase : Any = parser.parse_args()
main(args)
| 351 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '▁'
__UpperCamelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCamelCase : List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCamelCase : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = len(self.sp_model )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ )
}
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : Any = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tgt_lang_id
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __A ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = src_lang
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def __A ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
| 258 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
UpperCAmelCase : Dict[Optional[str], str] = {}
UpperCAmelCase : Dict[Optional[str], Exception] = {}
def _A ( SCREAMING_SNAKE_CASE : type , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ):
"""simple docstring"""
a__ : Optional[int] =aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
a__ : Tuple =formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
a__ : int =format_type
def _A ( SCREAMING_SNAKE_CASE : Exception , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[List[str]] = None ):
"""simple docstring"""
a__ : Tuple =aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a__ : str =unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
UpperCAmelCase : int = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
UpperCAmelCase : Dict = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
UpperCAmelCase : List[Any] = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def _A ( SCREAMING_SNAKE_CASE : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _A ( SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Optional[Any] =get_format_type_from_alias(SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 95 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(UpperCamelCase__ )
if rows != columns:
snake_case_ = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCamelCase__ )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : str = XGBClassifier()
classifier.fit(UpperCamelCase__ , UpperCamelCase__ )
return classifier
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = load_iris()
_a , _a : Dict = data_handling(UpperCamelCase__ )
_a , _a , _a , _a : List[Any] = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 )
_a : List[Any] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
_a : List[str] = xgboost(UpperCamelCase__ , UpperCamelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , display_labels=UpperCamelCase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 324 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_a = 'SpeechT5FeatureExtractor'
_a = 'SpeechT5Tokenizer'
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Dict , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = kwargs.pop("""text_target""" , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = kwargs.pop("""audio_target""" , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
lowerCAmelCase = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif text is not None:
lowerCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self.feature_extractor(audio_target=SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = targets['input_values']
elif text_target is not None:
lowerCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = targets['input_ids']
else:
lowerCAmelCase = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase = labels
lowerCAmelCase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def __lowercase ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int] ):
lowerCAmelCase = kwargs.pop("""input_values""" , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = kwargs.pop("""input_ids""" , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = kwargs.pop("""labels""" , SCREAMING_SNAKE_CASE_ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
lowerCAmelCase = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif input_ids is not None:
lowerCAmelCase = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase = None
if labels is not None:
if "input_ids" in labels or (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and "input_ids" in labels[0]):
lowerCAmelCase = self.tokenizer.pad(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = targets['input_ids']
else:
lowerCAmelCase = self.feature_extractor.feature_size
lowerCAmelCase = self.feature_extractor.num_mel_bins
lowerCAmelCase = self.feature_extractor.pad(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = feature_size_hack
lowerCAmelCase = targets['input_values']
else:
lowerCAmelCase = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase = labels
lowerCAmelCase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def __lowercase ( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[Any] ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[str] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 155 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
_SCREAMING_SNAKE_CASE = """CIDAS/clipseg-rd64-refined"""
_SCREAMING_SNAKE_CASE = """image_segmenter"""
_SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
_SCREAMING_SNAKE_CASE = ["""image""", """text"""]
_SCREAMING_SNAKE_CASE = ["""image"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(self , ['vision'] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ):
with torch.no_grad():
lowerCAmelCase_ : List[str] = self.model(**SCREAMING_SNAKE_CASE_ ).logits
return logits
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Dict = outputs.cpu().detach().numpy()
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Optional[Any] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 224 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCAmelCase__ ( self , a__ , a__ , *a__ , **a__ ) -> List[Any]:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
snake_case_ = kwargs.pop("main_process_only" , a__ )
snake_case_ = kwargs.pop("in_order" , a__ )
if self.isEnabledFor(a__ ):
if self._should_log(a__ ):
snake_case_ , snake_case_ = self.process(a__ , a__ )
self.logger.log(a__ , a__ , *a__ , **a__ )
elif in_order:
snake_case_ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case_ , snake_case_ = self.process(a__ , a__ )
self.logger.log(a__ , a__ , *a__ , **a__ )
state.wait_for_everyone()
def UpperCamelCase_( snake_case : str , snake_case : str = None ):
'''simple docstring'''
if log_level is None:
snake_case_ = os.environ.get("ACCELERATE_LOG_LEVEL" , snake_case )
snake_case_ = logging.getLogger(snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case , {} )
| 92 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _snake_case :
def __init__( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case_ = len(a__ ) - 1
def lowerCAmelCase__ ( self , a__ ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a__ ) , 5 ) == 1
return output_values
def lowerCAmelCase__ ( self , a__ ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case_ = self.basis_function(a__ )
snake_case_ = 0.0
snake_case_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase__ ( self , a__ = 0.0_1 ) -> Any:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
snake_case_ = [] # x coordinates of points to plot
snake_case_ = [] # y coordinates of points to plot
snake_case_ = 0.0
while t <= 1:
snake_case_ = self.bezier_curve_function(a__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case_ = [i[0] for i in self.list_of_points]
snake_case_ = [i[1] for i in self.list_of_points]
plt.plot(
a__ , a__ , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(a__ , a__ , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 92 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__: str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=8 ) -> Any:
UpperCAmelCase : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : VQModel , ) -> Dict:
super().__init__()
self.register_modules(
unet=__snake_case , scheduler=__snake_case , movq=__snake_case , )
UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : Tuple , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : int ) -> Optional[int]:
if latents is None:
UpperCAmelCase : Optional[int] = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase : List[Any] = latents.to(__snake_case )
UpperCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def A ( self : Dict , __snake_case : Dict=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase : Tuple = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
def A ( self : List[str] , __snake_case : Tuple=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase : Dict = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase , UpperCAmelCase : Any = cpu_offload_with_hook(__snake_case , __snake_case , prev_module_hook=__snake_case )
# We'll offload the last model manually.
UpperCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : List[str] ) -> Dict:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__snake_case )
def __call__( self : Tuple , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : int = 512 , __snake_case : int = 512 , __snake_case : int = 100 , __snake_case : float = 4.0 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> str:
UpperCAmelCase : int = self._execution_device
UpperCAmelCase : Optional[int] = guidance_scale > 1.0
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Union[str, Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : Tuple = torch.cat(__snake_case , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase : Optional[Any] = image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase : int = negative_image_embeds.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__snake_case )
self.scheduler.set_timesteps(__snake_case , device=__snake_case )
UpperCAmelCase : Optional[int] = self.scheduler.timesteps
UpperCAmelCase : Dict = self.unet.config.in_channels
UpperCAmelCase , UpperCAmelCase : str = downscale_height_and_width(__snake_case , __snake_case , self.movq_scale_factor )
# create initial latent
UpperCAmelCase : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __snake_case , __snake_case , __snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase : str = {'''image_embeds''': image_embeds}
UpperCAmelCase : Tuple = self.unet(
sample=__snake_case , timestep=__snake_case , encoder_hidden_states=__snake_case , added_cond_kwargs=__snake_case , return_dict=__snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase , UpperCAmelCase : str = noise_pred.chunk(2 )
UpperCAmelCase , UpperCAmelCase : int = variance_pred.chunk(2 )
UpperCAmelCase : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase , UpperCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , generator=__snake_case , )[0]
# post-processing
UpperCAmelCase : Optional[int] = self.movq.decode(__snake_case , force_not_quantize=__snake_case )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase : List[Any] = image * 0.5 + 0.5
UpperCAmelCase : int = image.clamp(0 , 1 )
UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : Optional[Any] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 23 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 2
while digits < n:
index += 1
lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 177 | 0 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase__ : List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
lowercase__ : str = logging.WARNING
def a__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = os.getenv('''DATASETS_VERBOSITY''', UpperCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ) -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def a__ ( ) -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def a__ ( lowercase : Optional[str] = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
_UpperCamelCase = _get_library_name()
return logging.getLogger(UpperCamelCase__ )
def a__ ( ) -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def a__ ( lowercase : int ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(UpperCamelCase__ )
def a__ ( ) -> List[Any]:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def a__ ( ) -> Dict:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def a__ ( ) -> Any:
"""simple docstring"""
return set_verbosity(UpperCamelCase__ )
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = False
def a__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : List[str] ) -> Optional[Any]: # pylint: disable=unused-argument
'''simple docstring'''
_UpperCamelCase = args[0] if args else None
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Dict , lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
def empty_fn(*lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[Any]:
'''simple docstring'''
return self
def __exit__( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return
lowercase__ : Dict = True
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : str , *lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : Dict ) -> int:
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__lowerCamelCase , **__lowerCamelCase )
else:
return EmptyTqdm(*__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self : Optional[Any] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCamelCase , **__lowerCamelCase )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase__ : str = _tqdm_cls()
def a__ ( ) -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
global _tqdm_active
_UpperCamelCase = True
def a__ ( ) -> int:
"""simple docstring"""
global _tqdm_active
_UpperCamelCase = False
| 367 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a__ ( lowercase : Tuple ) -> Dict:
"""simple docstring"""
_UpperCamelCase = int(lowercase )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def a__ ( lowercase : List[Any], lowercase : Dict, lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Any=300 ) -> Any:
"""simple docstring"""
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def a__ ( lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_UpperCamelCase = F"""{elt:.6f}""" if isinstance(lowercase, lowercase ) else str(lowercase )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = 5
_snake_case : Optional[int] = 0.2
def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase__ : int = 300 , ) -> int:
'''simple docstring'''
_UpperCamelCase = total
_UpperCamelCase = '''''' if prefix is None else prefix
_UpperCamelCase = leave
_UpperCamelCase = parent
_UpperCamelCase = width
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : str = None ) -> Dict:
'''simple docstring'''
_UpperCamelCase = value
if comment is not None:
_UpperCamelCase = comment
if self.last_value is None:
_UpperCamelCase = _UpperCamelCase = time.time()
_UpperCamelCase = _UpperCamelCase = value
_UpperCamelCase = _UpperCamelCase = None
_UpperCamelCase = self.warmup
_UpperCamelCase = 1
self.update_bar(lowerCAmelCase__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_UpperCamelCase = time.time()
_UpperCamelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_UpperCamelCase = self.elapsed_time / (value - self.start_value)
else:
_UpperCamelCase = None
if value >= self.total:
_UpperCamelCase = self.total
_UpperCamelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_UpperCamelCase = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase__ )
_UpperCamelCase = value
_UpperCamelCase = current_time
if self.average_time_per_item is None:
_UpperCamelCase = 1
else:
_UpperCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case__ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase__ ) )) + str(lowerCAmelCase__ )
if self.elapsed_time is None:
_UpperCamelCase = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_UpperCamelCase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_UpperCamelCase = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def snake_case__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = None if column_names is None else [column_names]
_UpperCamelCase = None
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_UpperCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.inner_table is None:
_UpperCamelCase = [list(values.keys() ), list(values.values() )]
else:
_UpperCamelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase__ )
_UpperCamelCase = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : List[str]=300 ) -> int:
'''simple docstring'''
_UpperCamelCase = NotebookProgressBar(lowerCAmelCase__ , prefix=lowerCAmelCase__ , parent=self , width=lowerCAmelCase__ )
return self.child_bar
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = None
self.display()
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = False
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , **lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
_UpperCamelCase = NotebookTrainingTracker(state.max_steps , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
_UpperCamelCase = False
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if not has_length(lowerCAmelCase__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_UpperCamelCase = self.training_tracker.add_child(len(lowerCAmelCase__ ) )
else:
_UpperCamelCase = NotebookProgressBar(len(lowerCAmelCase__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_UpperCamelCase = None
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_UpperCamelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_UpperCamelCase = state.global_step
self.training_tracker.write_line(lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
if self.training_tracker is not None:
_UpperCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_UpperCamelCase = log['''loss''']
break
if self.first_column == "Epoch":
_UpperCamelCase = int(state.epoch )
else:
_UpperCamelCase = state.global_step
_UpperCamelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
_UpperCamelCase = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop('''total_flos''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop('''epoch''' , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_runtime""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , lowerCAmelCase__ )
_UpperCamelCase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , lowerCAmelCase__ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
_UpperCamelCase = v
else:
_UpperCamelCase = k.split('''_''' )
_UpperCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
_UpperCamelCase = v
self.training_tracker.write_line(lowerCAmelCase__ )
self.training_tracker.remove_child()
_UpperCamelCase = None
# Evaluation takes a long time so we should force the next update.
_UpperCamelCase = True
def snake_case__ ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , **lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=lowerCAmelCase__ )
_UpperCamelCase = None
| 287 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( ):
lowerCAmelCase : Tuple = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=_snake_case , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=_snake_case , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=_snake_case , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=_snake_case , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=_snake_case , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=_snake_case , type=_snake_case , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=_snake_case , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=_snake_case , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def _snake_case ( _snake_case : Optional[Any] ):
def fn(_snake_case : Optional[int] ):
return tokenizer(examples['''text'''] )
return fn
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : Optional[Any] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowerCAmelCase : Optional[int] = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowerCAmelCase : Optional[Any] = tf.train.Features(feature=_snake_case )
lowerCAmelCase : int = tf.train.Example(features=_snake_case )
lowerCAmelCase : Dict = example.SerializeToString()
records.append(_snake_case )
return records
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase : Dict = min(len(_snake_case ) , args.limit )
lowerCAmelCase : List[Any] = dataset.select(range(_snake_case ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase : Union[str, Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(_snake_case ):
os.makedirs(_snake_case )
else:
lowerCAmelCase : int = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase : Optional[int] = tokenize_function(_snake_case )
lowerCAmelCase : Tuple = dataset.map(_snake_case , batched=_snake_case , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(_snake_case : Tuple ):
# Concatenate all texts.
lowerCAmelCase : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase : Optional[Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase : Any = {
k: [t[i : i + args.max_length] for i in range(0 , _snake_case , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase : Tuple = dataset_tokenized.map(_snake_case , batched=_snake_case , batch_size=1000 , num_proc=4 )
lowerCAmelCase : str = 0
lowerCAmelCase : Any = 0
for shard in range(0 , len(_snake_case ) , args.shard_size ):
lowerCAmelCase : Dict = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase : List[str] = len(dataset_snapshot['''input_ids'''] )
lowerCAmelCase : List[str] = os.path.join(_snake_case , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
lowerCAmelCase : Union[str, Any] = get_serialized_examples(_snake_case )
with tf.io.TFRecordWriter(_snake_case ) as out_file:
for i in range(len(_snake_case ) ):
lowerCAmelCase : Dict = serialized_examples[i]
out_file.write(_snake_case )
print('''Wrote file {} containing {} records'''.format(_snake_case , _snake_case ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=_snake_case )
if __name__ == "__main__":
snake_case__ : str = parse_args()
main(args)
| 60 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
if len(UpperCAmelCase ) <= 1:
return [tuple(UpperCAmelCase )]
snake_case_ = []
def generate(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [0] * n
res.append(tuple(UpperCAmelCase ) )
snake_case_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case_ , snake_case_ = arr[i], arr[0]
else:
snake_case_ , snake_case_ = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase ) )
c[i] += 1
snake_case_ = 0
else:
snake_case_ = 0
i += 1
generate(len(UpperCAmelCase ) , UpperCAmelCase )
return res
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 69 | 0 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = (1 - _cos) / 2
__lowercase = 1 - _cos
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = (1 + _cos) / 2
__lowercase = -1 - _cos
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = _sin / 2
__lowercase = 0
__lowercase = -ba
__lowercase = 1 + alpha
__lowercase = -2 * _cos
__lowercase = 1 - alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ = 1 / sqrt(2 ) ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 1 - alpha
__lowercase = -2 * _cos
__lowercase = 1 + alpha
__lowercase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ , A__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 10 ** (gain_db / 40)
__lowercase = 1 + alpha * big_a
__lowercase = -2 * _cos
__lowercase = 1 - alpha * big_a
__lowercase = 1 + alpha / big_a
__lowercase = -2 * _cos
__lowercase = 1 - alpha / big_a
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ , A__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 10 ** (gain_db / 40)
__lowercase = (big_a + 1) - (big_a - 1) * _cos
__lowercase = (big_a + 1) + (big_a - 1) * _cos
__lowercase = (big_a - 1) - (big_a + 1) * _cos
__lowercase = (big_a - 1) + (big_a + 1) * _cos
__lowercase = 2 * sqrt(A__ ) * alpha
__lowercase = big_a * (pmc + aaa)
__lowercase = 2 * big_a * mpc
__lowercase = big_a * (pmc - aaa)
__lowercase = ppmc + aaa
__lowercase = -2 * pmpc
__lowercase = ppmc - aaa
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _A ( A__ , A__ , A__ , A__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
__lowercase = tau * frequency / samplerate
__lowercase = sin(A__ )
__lowercase = cos(A__ )
__lowercase = _sin / (2 * q_factor)
__lowercase = 10 ** (gain_db / 40)
__lowercase = (big_a + 1) - (big_a - 1) * _cos
__lowercase = (big_a + 1) + (big_a - 1) * _cos
__lowercase = (big_a - 1) - (big_a + 1) * _cos
__lowercase = (big_a - 1) + (big_a + 1) * _cos
__lowercase = 2 * sqrt(A__ ) * alpha
__lowercase = big_a * (ppmc + aaa)
__lowercase = -2 * big_a * pmpc
__lowercase = big_a * (ppmc - aaa)
__lowercase = pmc + aaa
__lowercase = 2 * mpc
__lowercase = pmc - aaa
__lowercase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 52 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def _A ( A__ ):
"""simple docstring"""
for i in range(0 , A__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _A ( A__ ):
"""simple docstring"""
for i in range(A__ , 0 , -1 ):
for _ in range(A__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _A ( A__ ):
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(A__ ) # upper half
reverse_floyd(A__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
lowerCAmelCase__ = 1
while K:
lowerCAmelCase__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
lowerCAmelCase__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 52 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __magic_name__ ( lowercase , lowercase , lowercase=8 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE_: Tuple =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : VQModel , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , movq=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[int] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE_: Tuple =randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE_: List[str] =latents.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[int]=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE_: Tuple =torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE_: List[Any] =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str]=0 ) -> Optional[int]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
SCREAMING_SNAKE_CASE_: Dict =torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_: Optional[int] =None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =cpu_offload_with_hook(lowerCAmelCase , lowerCAmelCase , prev_module_hook=lowerCAmelCase )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_: Dict =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase )
def __call__( self : Optional[Any] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 100 , lowerCAmelCase : float = 4.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self._execution_device
SCREAMING_SNAKE_CASE_: Dict =guidance_scale > 1.0
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =torch.cat(lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =torch.cat(lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_: Dict =image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_: str =negative_image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE_: Dict =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase )
self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.scheduler.timesteps
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =downscale_height_and_width(lowerCAmelCase , lowerCAmelCase , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_: Any =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_: List[Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_: int ={"""image_embeds""": image_embeds}
SCREAMING_SNAKE_CASE_: Optional[Any] =self.unet(
sample=lowerCAmelCase , timestep=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , added_cond_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_: int =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_: Optional[Any] =self.scheduler.step(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase , )[0]
# post-processing
SCREAMING_SNAKE_CASE_: Optional[Any] =self.movq.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_: Dict =image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_: Tuple =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 173 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'vit_mae'
def __init__( self : int , lowerCAmelCase : int=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : str=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=224 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Any=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=16 , lowerCAmelCase : str=512 , lowerCAmelCase : int=8 , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : Tuple=0.7_5 , lowerCAmelCase : str=False , **lowerCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =initializer_range
SCREAMING_SNAKE_CASE_: int =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] =image_size
SCREAMING_SNAKE_CASE_: Dict =patch_size
SCREAMING_SNAKE_CASE_: str =num_channels
SCREAMING_SNAKE_CASE_: List[str] =qkv_bias
SCREAMING_SNAKE_CASE_: List[str] =decoder_num_attention_heads
SCREAMING_SNAKE_CASE_: Any =decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[Any] =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE_: str =decoder_intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =mask_ratio
SCREAMING_SNAKE_CASE_: List[str] =norm_pix_loss
| 173 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Any =PegasusConfig
lowercase : Any ={}
lowercase : int ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=40 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :Tuple = batch_size
lowercase_ :Optional[Any] = seq_length
lowercase_ :Any = is_training
lowercase_ :Optional[int] = use_labels
lowercase_ :Optional[int] = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :List[Any] = num_hidden_layers
lowercase_ :Tuple = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = max_position_embeddings
lowercase_ :Any = eos_token_id
lowercase_ :List[str] = pad_token_id
lowercase_ :Optional[Any] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :int = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[int] = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = TFPegasusModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Tuple = inputs_dict['''input_ids''']
lowercase_ :Optional[int] = input_ids[:1, :]
lowercase_ :Tuple = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :List[str] = inputs_dict['''head_mask''']
lowercase_ :int = 1
# first forward pass
lowercase_ :Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ :Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase_ :Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase_ :Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase_ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase_ :str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase_ :Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase_ :Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowercase_ :Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowercase : List[str] =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowercase : str =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[int] =True
lowercase : List[str] =False
lowercase : Union[str, Any] =False
def UpperCamelCase ( self ):
lowercase_ :Dict = TFPegasusModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase : Optional[int] =[
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowercase : Optional[Any] ="""google/pegasus-xsum"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
assert self.expected_text == generated_words
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Dict = self.tokenizer(self.src_text , **UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , )
lowercase_ :int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE : Dict = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCamelCase ( _a , _a=None ) -> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , _a )
| 252 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ :Tuple = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__=125 , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__UpperCamelCase : Optional[int] =[f'<extra_id_{i}>' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCamelCase : int =len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) , lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
__UpperCamelCase : List[str] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
__UpperCamelCase : int =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__UpperCamelCase : str =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , extra_ids=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
__UpperCamelCase : List[str] =extra_ids
__UpperCamelCase : List[Any] =2**8 # utf is 8 bits
# define special tokens dict
__UpperCamelCase : Dict[int, str] ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCamelCase : Tuple =len(self.special_tokens_encoder )
__UpperCamelCase : List[str] =len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
__UpperCamelCase : Optional[int] =self.vocab_size + i - n
__UpperCamelCase : Dict[str, int] ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowercase ( self ):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Any =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
__UpperCamelCase : List[Any] =self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =[chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if token in self.special_tokens_encoder:
__UpperCamelCase : Any =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCamelCase : List[str] =self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
__UpperCamelCase : List[Any] =self.unk_token_id
else:
__UpperCamelCase : List[str] =ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if index in self.special_tokens_decoder:
__UpperCamelCase : str =self.special_tokens_decoder[index]
else:
__UpperCamelCase : int =chr(index - self._num_special_tokens )
return token
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =b''
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCamelCase : List[str] =self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
__UpperCamelCase : List[Any] =self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
__UpperCamelCase : str =token.encode('utf-8' )
elif token in self.added_tokens_encoder:
__UpperCamelCase : List[Any] =token.encode('utf-8' )
else:
__UpperCamelCase : Union[str, Any] =bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
__UpperCamelCase : str =bstring.decode('utf-8' , errors='ignore' )
return string
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
return ()
| 71 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :int = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""vit_msn"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-06 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : int =hidden_size
__UpperCamelCase : List[Any] =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : List[str] =intermediate_size
__UpperCamelCase : Union[str, Any] =hidden_act
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : Union[str, Any] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Optional[Any] =image_size
__UpperCamelCase : Optional[int] =patch_size
__UpperCamelCase : Any =num_channels
__UpperCamelCase : str =qkv_bias
| 71 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=False , ):
lowercase : str = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase : List[Any] = parent
lowercase : List[str] = batch_size
lowercase : str = num_channels
lowercase : List[Any] = image_size
lowercase : List[str] = min_resolution
lowercase : Optional[Any] = max_resolution
lowercase : Dict = do_resize
lowercase : List[str] = size
lowercase : Dict = do_center_crop
lowercase : int = crop_size
lowercase : int = do_normalize
lowercase : Dict = image_mean
lowercase : List[str] = image_std
lowercase : List[str] = do_reduce_labels
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowercase ( ) ->Any:
"""simple docstring"""
lowercase : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
lowercase : Optional[Any] = Image.open(dataset[0]['''file'''] )
lowercase : int = Image.open(dataset[1]['''file'''] )
return image, map
def __lowercase ( ) ->Optional[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
lowercase : Any = Image.open(ds[0]['''file'''] )
lowercase : List[str] = Image.open(ds[1]['''file'''] )
lowercase : Optional[int] = Image.open(ds[2]['''file'''] )
lowercase : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
A : str = BeitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = BeitImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
lowercase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_normalize''' ) )
self.assertTrue(hasattr(a_ , '''image_mean''' ) )
self.assertTrue(hasattr(a_ , '''image_std''' ) )
def __lowerCamelCase ( self ):
lowercase : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , a_ )
lowercase : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , a_ )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Any = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self ):
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowercase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Any = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCamelCase ( self ):
lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
lowercase : Dict = []
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowercase : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
lowercase : str = image_processing(a_ , a_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
lowercase : List[Any] = prepare_semantic_single_inputs()
lowercase : List[str] = image_processing(a_ , a_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
lowercase : Any = prepare_semantic_batch_inputs()
lowercase : Dict = image_processing(a_ , a_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def __lowerCamelCase ( self ):
lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase : Dict = prepare_semantic_single_inputs()
lowercase : str = image_processing(a_ , a_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
lowercase : int = True
lowercase : Any = image_processing(a_ , a_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 352 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0.9_9_9, _UpperCamelCase="cosine", ) ->Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase : List[str] = []
for i in range(_UpperCamelCase ):
lowercase : List[str] = i / num_diffusion_timesteps
lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCamelCase ) / alpha_bar_fn(_UpperCamelCase ), _UpperCamelCase ) )
return torch.tensor(_UpperCamelCase, dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( A__ , A__ ):
A : Any = [e.name for e in KarrasDiffusionSchedulers]
A : Dict = 2
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE__ = 1000 , SCREAMING_SNAKE_CASE__ = 0.00085 , SCREAMING_SNAKE_CASE__ = 0.012 , SCREAMING_SNAKE_CASE__ = "linear" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "epsilon" , SCREAMING_SNAKE_CASE__ = "linspace" , SCREAMING_SNAKE_CASE__ = 0 , ):
if trained_betas is not None:
lowercase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase : Union[str, Any] = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase : str = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase : Optional[int] = 1.0 - self.betas
lowercase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if schedule_timesteps is None:
lowercase : Union[str, Any] = self.timesteps
lowercase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase : List[Any] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 1 else 0
else:
lowercase : int = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
lowercase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
if self.state_in_first_order:
lowercase : Any = self.sigmas[step_index]
else:
lowercase : Optional[int] = self.sigmas_interpol[step_index]
lowercase : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ):
lowercase : Any = num_inference_steps
lowercase : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase : Dict = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Union[str, Any] = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase : Dict = (np.arange(SCREAMING_SNAKE_CASE__ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE__ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowercase : int = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.interp(SCREAMING_SNAKE_CASE__ , np.arange(0 , len(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ )
# interpolate sigmas
lowercase : int = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowercase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
# mps does not support float64
lowercase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
else:
lowercase : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
# interpolate timesteps
lowercase : Any = self.sigma_to_t(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ , dtype=timesteps.dtype )
lowercase : List[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowercase : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase : Dict = defaultdict(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
# get log sigma
lowercase : Any = sigma.log()
# get distribution
lowercase : Optional[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase : str = low_idx + 1
lowercase : Union[str, Any] = self.log_sigmas[low_idx]
lowercase : Union[str, Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase : Dict = (low - log_sigma) / (low - high)
lowercase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
lowercase : List[str] = (1 - w) * low_idx + w * high_idx
lowercase : Tuple = t.view(sigma.shape )
return t
@property
def __lowerCamelCase ( self ):
return self.sample is None
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : Optional[Any] = self.index_for_timestep(SCREAMING_SNAKE_CASE__ )
# advance index counter by 1
lowercase : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase : Union[str, Any] = self.sigmas[step_index]
lowercase : List[Any] = self.sigmas_interpol[step_index + 1]
lowercase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase : Any = self.sigmas[step_index - 1]
lowercase : List[Any] = self.sigmas_interpol[step_index]
lowercase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase : Union[str, Any] = 0
lowercase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase : Optional[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase : str = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase : str = sigma_next - sigma_hat
lowercase : List[str] = self.sample
lowercase : Optional[int] = None
lowercase : str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE__ ):
# mps does not support float64
lowercase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase : List[str] = self.timesteps.to(original_samples.device )
lowercase : Any = timesteps.to(original_samples.device )
lowercase : Tuple = [self.index_for_timestep(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for t in timesteps]
lowercase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase : Any = sigma.unsqueeze(-1 )
lowercase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 173 | 0 |
def A_ ( A__ , A__ ) -> None:
a__ : str = len(A__ )
print('The following activities are selected:' )
# The first activity is always selected
a__ : Dict = 0
print(A__ , end=',' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=',' )
a__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Tuple = [1, 3, 0, 5, 8, 5]
lowercase : Optional[int] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 99 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase)
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = 100 , lowercase = None , lowercase = None , lowercase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
a__ : Optional[int] = self.unet.config.sample_size / self.unet.config.sample_rate
a__ : int = audio_length_in_s * self.unet.config.sample_rate
a__ : Union[str, Any] = 2 ** len(self.unet.up_blocks)
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.')
a__ : str = int(lowercase)
if sample_size % down_scale_factor != 0:
a__ : List[str] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.')
a__ : List[Any] = int(lowercase)
a__ : int = next(iter(self.unet.parameters())).dtype
a__ : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=lowercase)
# set step values
self.scheduler.set_timesteps(lowercase , device=audio.device)
a__ : Union[str, Any] = self.scheduler.timesteps.to(lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
a__ : Dict = self.unet(lowercase , lowercase).sample
# 2. compute previous image: x_t -> t_t-1
a__ : Any = self.scheduler.step(lowercase , lowercase , lowercase).prev_sample
a__ : str = audio.clamp(-1 , 1).float().cpu().numpy()
a__ : List[Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase)
| 99 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = checkpoint
_a : List[Any] = {}
_a : Tuple = vae_state_dict["""encoder.conv_in.weight"""]
_a : str = vae_state_dict["""encoder.conv_in.bias"""]
_a : Tuple = vae_state_dict["""encoder.conv_out.weight"""]
_a : Dict = vae_state_dict["""encoder.conv_out.bias"""]
_a : List[Any] = vae_state_dict["""encoder.norm_out.weight"""]
_a : Dict = vae_state_dict["""encoder.norm_out.bias"""]
_a : Tuple = vae_state_dict["""decoder.conv_in.weight"""]
_a : Union[str, Any] = vae_state_dict["""decoder.conv_in.bias"""]
_a : Any = vae_state_dict["""decoder.conv_out.weight"""]
_a : Optional[Any] = vae_state_dict["""decoder.conv_out.bias"""]
_a : Any = vae_state_dict["""decoder.norm_out.weight"""]
_a : str = vae_state_dict["""decoder.norm_out.bias"""]
_a : Any = vae_state_dict["""quant_conv.weight"""]
_a : Optional[Any] = vae_state_dict["""quant_conv.bias"""]
_a : Union[str, Any] = vae_state_dict["""post_quant_conv.weight"""]
_a : Dict = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_a : int = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
_a : Tuple = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ )
}
# Retrieves the keys for the decoder up blocks only
_a : Any = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
_a : Optional[Any] = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(UpperCamelCase__ )
}
for i in range(UpperCamelCase__ ):
_a : str = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_a : Any = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
_a : Union[str, Any] = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
_a : Any = renew_vae_resnet_paths(UpperCamelCase__ )
_a : Any = {"""old""": F"""down.{i}.block""", """new""": F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : Dict = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_a : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : str = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
_a : Tuple = renew_vae_resnet_paths(UpperCamelCase__ )
_a : Any = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : Optional[int] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_a : str = renew_vae_attention_paths(UpperCamelCase__ )
_a : List[Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
_a : Union[str, Any] = num_up_blocks - 1 - i
_a : Union[str, Any] = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_a : Union[str, Any] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
_a : Optional[int] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
_a : int = renew_vae_resnet_paths(UpperCamelCase__ )
_a : int = {"""old""": F"""up.{block_id}.block""", """new""": F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : int = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_a : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_a : int = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
_a : Dict = renew_vae_resnet_paths(UpperCamelCase__ )
_a : List[str] = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
_a : Any = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_a : str = renew_vae_attention_paths(UpperCamelCase__ )
_a : str = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ )
conv_attn_to_linear(UpperCamelCase__ )
return new_checkpoint
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : List[Any] = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
_a : Dict = io.BytesIO(r.content )
_a : Dict = OmegaConf.load(UpperCamelCase__ )
_a : Union[str, Any] = 5_1_2
_a : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
_a : str = {}
with safe_open(UpperCamelCase__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
_a : List[Any] = f.get_tensor(UpperCamelCase__ )
else:
_a : str = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )["""state_dict"""]
# Convert the VAE model.
_a : List[Any] = create_vae_diffusers_config(UpperCamelCase__ , image_size=UpperCamelCase__ )
_a : Tuple = custom_convert_ldm_vae_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
_a : Optional[Any] = AutoencoderKL(**UpperCamelCase__ )
vae.load_state_dict(UpperCamelCase__ )
vae.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_snake_case = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 353 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[Any]=PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : List[str] , ) -> None:
_a : int = do_resize
_a : Union[str, Any] = do_rescale
_a : Any = size_divisor
_a : Any = resample
super().__init__(**UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[Any] ) -> np.ndarray:
_a , _a : Tuple = get_image_size(UpperCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
_a : Optional[Any] = height // size_divisor * size_divisor
_a : Union[str, Any] = width // size_divisor * size_divisor
_a : Any = resize(UpperCAmelCase__ , (new_h, new_w) , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
return image
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : float , UpperCAmelCase__ : Optional[ChannelDimension] = None , **UpperCAmelCase__ : Optional[int] ) -> np.ndarray:
return rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[TensorType, str]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> BatchFeature:
_a : Dict = do_resize if do_resize is not None else self.do_resize
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : str = size_divisor if size_divisor is not None else self.size_divisor
_a : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
_a : List[str] = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
_a : Tuple = [to_numpy_array(UpperCAmelCase__ ) for img in images]
if do_resize:
_a : Optional[int] = [self.resize(UpperCAmelCase__ , size_divisor=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
_a : str = [self.rescale(UpperCAmelCase__ , scale=1 / 255 ) for image in images]
_a : Any = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
_a : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 324 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case__ ( _A: Optional[Any] , _A: List[Any] , _A: Union[str, Any] , _A: str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
lowerCAmelCase = f"{src_lang}-{tgt_lang}"
lowerCAmelCase = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=_A , exist_ok=_A )
lowerCAmelCase = os.path.join(_A , """README.md""" )
print(f"Generating {path}" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(_A )
# make sure we are under the root of the project
__lowercase = Path(__file__).resolve().parent.parent.parent
__lowercase = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__lowercase = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 272 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( UpperCAmelCase_ ):
A__ : Optional[Any] = (DDIMParallelScheduler,)
A__ : List[Any] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def lowerCamelCase_ ( self , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowercase )
return config
def lowerCamelCase_ ( self , **__UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**__lowercase )
UpperCamelCase_ = scheduler_class(**__lowercase )
UpperCamelCase_ = 1_0, 0.0
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for t in scheduler.timesteps:
UpperCamelCase_ = model(__lowercase , __lowercase )
UpperCamelCase_ = scheduler.step(__lowercase , __lowercase , __lowercase , __lowercase ).prev_sample
return sample
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowercase )
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase_ = scheduler_class(**__lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowercase , prediction_type=__lowercase , sample_max_value=__lowercase , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=__lowercase , num_inference_steps=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowercase , eta=__lowercase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__lowercase )
UpperCamelCase_ = 1_0, 0.0
scheduler.set_timesteps(__lowercase )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
UpperCamelCase_ = self.dummy_sample_deter + 0.1
UpperCamelCase_ = self.dummy_sample_deter - 0.1
UpperCamelCase_ = samplea.shape[0]
UpperCamelCase_ = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase_ = torch.arange(__lowercase )[0:3, None].repeat(1 , __lowercase )
UpperCamelCase_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase_ = scheduler.batch_step_no_noise(__lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __lowercase )
UpperCamelCase_ = torch.sum(torch.abs(__lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.full_loop()
UpperCamelCase_ = torch.sum(torch.abs(__lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.full_loop(prediction_type="""v_prediction""" )
UpperCamelCase_ = torch.sum(torch.abs(__lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.01 )
UpperCamelCase_ = torch.sum(torch.abs(__lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.full_loop(set_alpha_to_one=__lowercase , beta_start=0.01 )
UpperCamelCase_ = torch.sum(torch.abs(__lowercase ) )
UpperCamelCase_ = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 367 |
import math
def lowerCamelCase__ ( a__ : float , a__ : float ) -> float:
if (
not isinstance(a__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def lowerCamelCase__ ( a__ : float , a__ : float ) -> float:
if (
not isinstance(a__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = ProphetNetTokenizer
lowerCAmelCase_ = False
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
snake_case_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case__ ( self : Union[str, Any] , __lowercase : Any ):
"""simple docstring"""
snake_case_ = "UNwant\u00E9d,running"
snake_case_ = "unwanted, running"
return input_text, output_text
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowercase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [9, 6, 7, 12, 10, 11] )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase , strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = BasicTokenizer(do_lower_case=__lowercase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
snake_case_ = {}
for i, token in enumerate(__lowercase ):
snake_case_ = i
snake_case_ = WordpieceTokenizer(vocab=__lowercase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
snake_case_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
snake_case_ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
snake_case_ = tokenizer(__lowercase , padding=__lowercase , return_tensors="pt" )
self.assertIsInstance(__lowercase , __lowercase )
snake_case_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=__lowercase )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 187 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ = None, ) -> Tuple:
__UpperCAmelCase : Optional[int] = {}
if train_file is not None:
__UpperCAmelCase : str = [train_file]
if eval_file is not None:
__UpperCAmelCase : List[Any] = [eval_file]
if test_file is not None:
__UpperCAmelCase : Optional[Any] = [test_file]
__UpperCAmelCase : Optional[int] = datasets.load_dataset("csv", data_files=snake_case__ )
__UpperCAmelCase : Tuple = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase : str = features_name.pop(snake_case__ )
__UpperCAmelCase : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase : Union[str, Any] = {label: i for i, label in enumerate(snake_case__ )}
__UpperCAmelCase : List[str] = tokenizer.model_input_names
__UpperCAmelCase : Any = {}
if len(snake_case__ ) == 1:
for k in files.keys():
__UpperCAmelCase : Optional[int] = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=snake_case__, max_length=snake_case__, padding="max_length" ), batched=snake_case__, )
elif len(snake_case__ ) == 2:
for k in files.keys():
__UpperCAmelCase : str = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=snake_case__, max_length=snake_case__, padding="max_length", ), batched=snake_case__, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase : List[str] = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase : int = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase : Any = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase : Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase : List[Any] = (
tf.data.Dataset.from_generator(
snake_case__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase : int = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case = logging.getLogger(__name__)
@dataclass
class _snake_case :
lowerCamelCase__: int = field(metadata={"help": "Which column contains the label"} )
lowerCamelCase__: str = field(default=_lowercase , metadata={"help": "The path of the training file"} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The path of the development file"} )
lowerCamelCase__: Optional[str] = field(default=_lowercase , metadata={"help": "The path of the test file"} )
lowerCamelCase__: int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__: bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _snake_case :
lowerCamelCase__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__: bool = field(default=_lowercase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase__: Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def _UpperCamelCase ( ) -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=snake_case__, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(snake_case__ ), labelaid=snake_case__, idalabel={id: label for label, id in labelaid.items()}, finetuning_task="text-classification", cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
__UpperCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool(".bin" in model_args.model_name_or_path ), config=snake_case__, cache_dir=model_args.cache_dir, )
def compute_metrics(snake_case__ ) -> Dict:
__UpperCAmelCase : str = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase : Dict = TFTrainer(
model=snake_case__, args=snake_case__, train_dataset=snake_case__, eval_dataset=snake_case__, compute_metrics=snake_case__, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : List[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : Optional[Any] = trainer.evaluate()
__UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir, "eval_results.txt" )
with open(snake_case__, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(snake_case__ )
return results
if __name__ == "__main__":
main()
| 342 | import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 342 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""llama"""
a_ =["""past_key_values"""]
def __init__( self : Union[str, Any] , _a : Optional[int]=3_2000 , _a : Union[str, Any]=4096 , _a : Optional[Any]=1_1008 , _a : List[str]=32 , _a : Optional[Any]=32 , _a : Union[str, Any]=None , _a : Dict="silu" , _a : Optional[int]=2048 , _a : Optional[int]=0.02 , _a : Tuple=1e-6 , _a : Optional[Any]=True , _a : Optional[int]=0 , _a : Optional[int]=1 , _a : List[str]=2 , _a : Optional[int]=1 , _a : Optional[Any]=False , _a : int=None , **_a : Dict , ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = num_key_value_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = rms_norm_eps
__lowerCamelCase : List[str] = pretraining_tp
__lowerCamelCase : str = use_cache
__lowerCamelCase : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def _lowercase ( self : int ) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}' )
__lowerCamelCase : List[str] = self.rope_scaling.get('type' , _a )
__lowerCamelCase : int = self.rope_scaling.get('factor' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 208 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[str] = vae_state_dict['encoder.conv_in.weight']
__lowerCamelCase : Dict = vae_state_dict['encoder.conv_in.bias']
__lowerCamelCase : Optional[Any] = vae_state_dict['encoder.conv_out.weight']
__lowerCamelCase : Optional[int] = vae_state_dict['encoder.conv_out.bias']
__lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.weight']
__lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.bias']
__lowerCamelCase : Dict = vae_state_dict['decoder.conv_in.weight']
__lowerCamelCase : List[str] = vae_state_dict['decoder.conv_in.bias']
__lowerCamelCase : Tuple = vae_state_dict['decoder.conv_out.weight']
__lowerCamelCase : List[str] = vae_state_dict['decoder.conv_out.bias']
__lowerCamelCase : Dict = vae_state_dict['decoder.norm_out.weight']
__lowerCamelCase : Union[str, Any] = vae_state_dict['decoder.norm_out.bias']
__lowerCamelCase : Any = vae_state_dict['quant_conv.weight']
__lowerCamelCase : Union[str, Any] = vae_state_dict['quant_conv.bias']
__lowerCamelCase : Tuple = vae_state_dict['post_quant_conv.weight']
__lowerCamelCase : List[Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
__lowerCamelCase : str = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
__lowerCamelCase : int = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(_lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
__lowerCamelCase : Dict = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
__lowerCamelCase : Dict = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(_lowerCAmelCase )
}
for i in range(_lowerCAmelCase ):
__lowerCamelCase : List[str] = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__lowerCamelCase : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
__lowerCamelCase : Any = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
__lowerCamelCase : Optional[int] = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Any = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : List[Any] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
__lowerCamelCase : str = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
__lowerCamelCase : int = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
__lowerCamelCase : Optional[Any] = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Dict = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
__lowerCamelCase : int = renew_vae_attention_paths(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = num_up_blocks - 1 - i
__lowerCamelCase : Any = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__lowerCamelCase : Optional[int] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
__lowerCamelCase : Optional[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
__lowerCamelCase : Tuple = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Any = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
__lowerCamelCase : Union[str, Any] = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
__lowerCamelCase : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
__lowerCamelCase : str = renew_vae_resnet_paths(_lowerCAmelCase )
__lowerCamelCase : Any = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
__lowerCamelCase : Any = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
__lowerCamelCase : List[str] = renew_vae_attention_paths(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,additional_replacements=[meta_path] ,config=_lowerCAmelCase )
conv_attn_to_linear(_lowerCAmelCase )
return new_checkpoint
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,) -> List[Any]:
# Only support V1
__lowerCamelCase : Any = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
__lowerCamelCase : str = io.BytesIO(r.content )
__lowerCamelCase : Any = OmegaConf.load(_lowerCAmelCase )
__lowerCamelCase : List[Any] = 512
__lowerCamelCase : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
__lowerCamelCase : Optional[Any] = {}
with safe_open(_lowerCAmelCase ,framework='pt' ,device='cpu' ) as f:
for key in f.keys():
__lowerCamelCase : int = f.get_tensor(_lowerCAmelCase )
else:
__lowerCamelCase : int = torch.load(_lowerCAmelCase ,map_location=_lowerCAmelCase )['state_dict']
# Convert the VAE model.
__lowerCamelCase : int = create_vae_diffusers_config(_lowerCAmelCase ,image_size=_lowerCAmelCase )
__lowerCamelCase : int = custom_convert_ldm_vae_checkpoint(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Any = AutoencoderKL(**_lowerCAmelCase )
vae.load_state_dict(_lowerCAmelCase )
vae.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_UpperCamelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 208 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=1_0 ) -> Tuple:
A: List[Any] = []
for _ in range(__lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=1_0 ) -> Any:
A: int = []
for step in range(__lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A: Any = os.path.join(__lowercase , '''schedule.bin''' )
torch.save(scheduler.state_dict() , __lowercase )
A: Union[str, Any] = torch.load(__lowercase )
scheduler.load_state_dict(__lowercase )
return lrs
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A: Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE_ )
A: str = torch.tensor([0.4, 0.2, -0.5] )
A: List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_00 ):
A: int = criterion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _snake_case ( self : int ) -> Dict:
'''simple docstring'''
A: List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE_ )
A: List[str] = torch.tensor([0.4, 0.2, -0.5] )
A: int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A: List[Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE_ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE_ , scale_parameter=SCREAMING_SNAKE_CASE_ , warmup_init=SCREAMING_SNAKE_CASE_ , )
for _ in range(10_00 ):
A: Dict = criterion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCamelCase_ : List[str] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase_ : List[str] = 10
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> Dict:
'''simple docstring'''
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ , msg=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A: Optional[int] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A , A: Optional[Any] = data
A: Any = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A: int = unwrap_schedule(SCREAMING_SNAKE_CASE_ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
A: Union[str, Any] = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE_ ) # wrap to test picklability of the schedule
A: Any = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE_ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , msg=f"""failed for {scheduler_func} in save and reload""" )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = fn
def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
'''simple docstring'''
A: int = list(map(self , scheduler.lr_lambdas ) )
| 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(features=lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =Sql(
cache_dir=lowerCAmelCase, features=lowerCAmelCase, sql=lowerCAmelCase, con=lowerCAmelCase, **lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
self.builder.download_and_prepare(
download_config=lowerCAmelCase, download_mode=lowerCAmelCase, verification_mode=lowerCAmelCase, base_path=lowerCAmelCase, )
# Build dataset for splits
lowerCamelCase_ =self.builder.as_dataset(
split='''train''', verification_mode=lowerCAmelCase, in_memory=self.keep_in_memory )
return dataset
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
lowerCamelCase_ =dataset
lowerCamelCase_ =name
lowerCamelCase_ =con
lowerCamelCase_ =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase_ =num_proc
lowerCamelCase_ =to_sql_kwargs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.to_sql_kwargs.pop('''sql''', lowerCAmelCase )
lowerCamelCase_ =self.to_sql_kwargs.pop('''con''', lowerCAmelCase )
lowerCamelCase_ =self.to_sql_kwargs.pop('''index''', lowerCAmelCase )
lowerCamelCase_ =self._write(index=lowerCAmelCase, **self.to_sql_kwargs )
return written
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =args
lowerCamelCase_ ={**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
lowerCamelCase_ =query_table(
table=self.dataset.data, key=slice(lowerCAmelCase, offset + self.batch_size ), indices=self.dataset._indices, )
lowerCamelCase_ =batch.to_pandas()
lowerCamelCase_ =df.to_sql(self.name, self.con, index=lowerCAmelCase, **lowerCAmelCase )
return num_rows or len(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset ), self.batch_size ), unit='''ba''', disable=not logging.is_progress_bar_enabled(), desc='''Creating SQL from Arrow format''', ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCamelCase_, lowerCamelCase_ =len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql, [(offset, index, to_sql_kwargs) for offset in range(0, lowerCAmelCase, lowerCAmelCase )], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit='''ba''', disable=not logging.is_progress_bar_enabled(), desc='''Creating SQL from Arrow format''', ):
written += num_rows
return written
| 75 |
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ (lowerCAmelCase_ = 150_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = defaultdict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCAmelCase_ , 2 ):
if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) > 1:
continue
__SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 361 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 195 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> List[Any]:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
UpperCAmelCase : Any = 4
UpperCAmelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
UpperCAmelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase_ ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(a__ ) - np.asarray(a__ )) ** 2 ) )
def lowercase_ ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(a__ , a__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 370 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''OwlViTFeatureExtractor''']
_UpperCamelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =ShapEPipeline
lowercase_ : List[Any] =['''prompt''']
lowercase_ : int =['''prompt''']
lowercase_ : Union[str, Any] =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Optional[int] =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 8
@property
def A__ ( self):
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(A__)
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase = PriorTransformer(**A__)
return model
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase = ShapERenderer(**A__)
return model
def A__ ( self):
lowercase = self.dummy_prior
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_renderer
lowercase = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=A__ ,clip_sample=A__ ,clip_sample_range=1.0 ,)
lowercase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images[0]
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
lowercase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def A__ ( self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def A__ ( self):
lowercase = torch_device == '''cpu'''
lowercase = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=A__ ,relax_max_difference=A__ ,)
def A__ ( self):
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = 1
lowercase = 2
lowercase = self.get_dummy_inputs(A__)
for key in inputs.keys():
if key in self.batch_params:
lowercase = batch_size * [inputs[key]]
lowercase = pipe(**A__ ,num_images_per_prompt=A__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
lowercase = ShapEPipeline.from_pretrained('''openai/shap-e''')
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device=A__).manual_seed(0)
lowercase = pipe(
'''a shark''' ,generator=A__ ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowercase = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f'bert/{name}'
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = tf.dtypes.as_dtype(tensor.dtype )
lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase = to_tf_var_name(lowerCAmelCase__ )
lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase = torch_tensor.T
lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = session.run(lowerCAmelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' )
lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
lowercase = parser.parse_args(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 101 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : int = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ = """gptj"""
UpperCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int]=50_400 , UpperCAmelCase__ : Optional[int]=2_048 , UpperCAmelCase__ : str=4_096 , UpperCAmelCase__ : Any=28 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : List[str]=64 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Union[str, Any]="gelu_new" , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=1e-5 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=50_256 , UpperCAmelCase__ : int=50_256 , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : Any , ) ->List[str]:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = rotary_dim
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = bos_token_id
A__ = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , tie_word_embeddings=UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" , UpperCAmelCase__ : List[PatchingSpec] = None , UpperCAmelCase__ : bool = False , ) ->Any:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , task=UpperCAmelCase__ , patching_specs=UpperCAmelCase__ , use_past=UpperCAmelCase__)
if not getattr(self._config , '''pad_token_id''' , UpperCAmelCase__):
# TODO: how to do that better?
A__ = 0
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction='''inputs''')
A__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : PreTrainedTokenizer , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , ) ->List[str]:
'''simple docstring'''
A__ = super(UpperCAmelCase__ , self).generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__)
# We need to order the input in the way they appears in the forward()
A__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
A__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ = [
(torch.zeros(UpperCAmelCase__), torch.zeros(UpperCAmelCase__)) for _ in range(self.num_layers)
]
A__ = common_inputs["""attention_mask"""]
if self.use_past:
A__ = ordered_inputs["""attention_mask"""].dtype
A__ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ , dtype=UpperCAmelCase__)] , dim=1)
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
return 13
| 356 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : str = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261 | """simple docstring"""
from collections import Counter
from timeit import timeit
def _lowerCamelCase( a = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def _lowerCamelCase( a = "" ):
if len(a ) == 0:
return True
__a = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__a = {}
for character in lower_case_input_str:
__a = character_freq_dict.get(a , 0 ) + 1
__a = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowerCamelCase( a = "" ):
print("\nFor string = " , a , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(a ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
SCREAMING_SNAKE_CASE__:Dict = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 261 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = '''openai/whisper-base'''
lowerCAmelCase_ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowerCAmelCase_ = '''transcriber'''
lowerCAmelCase_ = WhisperProcessor
lowerCAmelCase_ = WhisperForConditionalGeneration
lowerCAmelCase_ = ['''audio''']
lowerCAmelCase_ = ['''text''']
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.pre_processor(lowerCAmelCase_ , return_tensors='pt' ).input_features
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
return self.model.generate(inputs=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
return self.pre_processor.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )[0]
| 295 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_snake_case = False
if "finetuned" in model_name:
_snake_case = 'huggingface/label-files'
if "kinetics" in model_name:
_snake_case = 400
_snake_case = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
_snake_case = 174
_snake_case = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
_snake_case = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict ) -> int:
'''simple docstring'''
if "small" in model_name:
_snake_case = 384
_snake_case = 1_536
_snake_case = 12
_snake_case = 16
_snake_case = 12
_snake_case = 3
_snake_case = 192
_snake_case = 768
elif "large" in model_name:
_snake_case = 1_024
_snake_case = 4_096
_snake_case = 24
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 512
_snake_case = 2_048
elif "huge" in model_name:
_snake_case = 1_280
_snake_case = 5_120
_snake_case = 32
_snake_case = 16
_snake_case = 12
_snake_case = 8
_snake_case = 640
_snake_case = 2_560
elif "base" not in model_name:
raise ValueError('Model name should include either "small", "base", "large", or "huge"' )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
if "encoder." in name:
_snake_case = name.replace('encoder.' , '' )
if "cls_token" in name:
_snake_case = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
_snake_case = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
_snake_case = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
_snake_case = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
_snake_case = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
_snake_case = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
_snake_case = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_snake_case = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
_snake_case = name.replace('head' , 'classifier' )
return name
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith('encoder.' ):
_snake_case = key.replace('encoder.' , '' )
if "qkv" in key:
_snake_case = key.split('.' )
if key.startswith('decoder.blocks' ):
_snake_case = config.decoder_hidden_size
_snake_case = int(key_split[2] )
_snake_case = 'decoder.decoder_layers.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = config.hidden_size
_snake_case = int(key_split[1] )
_snake_case = 'videomae.encoder.layer.'
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_snake_case = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_snake_case = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_snake_case = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_snake_case = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_snake_case = 'pytorch_model.bin'
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )
if "model" in files:
_snake_case = files['model']
else:
_snake_case = files['module']
_snake_case = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_snake_case = prepare_video()
_snake_case = image_processor(UpperCamelCase__ , return_tensors='pt' )
if "finetuned" not in model_name:
_snake_case = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
_snake_case = torch.load(UpperCamelCase__ )
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits
_snake_case = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
_snake_case = torch.Size([1, 400] )
_snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
_snake_case = torch.Size([1, 1_408, 1_536] )
_snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_snake_case = torch.Size([1, 174] )
_snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
_snake_case = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(UpperCamelCase__ , organization='nielsr' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 295 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
def A_ ( A__ ) -> List[List[ImageInput]]:
if isinstance(A__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A__ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase)
a__ : int = size if size is not None else {'shortest_edge': 256}
a__ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase)
a__ : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
a__ : Dict = get_size_dict(lowercase , param_name='crop_size')
a__ : int = do_resize
a__ : Optional[int] = size
a__ : List[str] = do_center_crop
a__ : int = crop_size
a__ : Optional[int] = resample
a__ : Any = do_rescale
a__ : str = rescale_factor
a__ : List[Any] = offset
a__ : Optional[int] = do_normalize
a__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase)
if "shortest_edge" in size:
a__ : Any = get_resize_output_image_size(lowercase , size['shortest_edge'] , default_to_square=lowercase)
elif "height" in size and "width" in size:
a__ : List[Any] = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__ : Union[str, Any] = get_size_dict(lowercase)
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase = True , lowercase = None , **lowercase , ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = image.astype(np.floataa)
if offset:
a__ : Union[str, Any] = image - (scale / 2)
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.')
# All transformations expect numpy arrays.
a__ : int = to_numpy_array(lowercase)
if do_resize:
a__ : Any = self.resize(image=lowercase , size=lowercase , resample=lowercase)
if do_center_crop:
a__ : Any = self.center_crop(lowercase , size=lowercase)
if do_rescale:
a__ : Union[str, Any] = self.rescale(image=lowercase , scale=lowercase , offset=lowercase)
if do_normalize:
a__ : Tuple = self.normalize(image=lowercase , mean=lowercase , std=lowercase)
a__ : List[Any] = to_channel_dimension_format(lowercase , lowercase)
return image
def __lowercase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
a__ : Dict = do_resize if do_resize is not None else self.do_resize
a__ : Optional[int] = resample if resample is not None else self.resample
a__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : str = do_rescale if do_rescale is not None else self.do_rescale
a__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : Optional[int] = offset if offset is not None else self.offset
a__ : Any = do_normalize if do_normalize is not None else self.do_normalize
a__ : Any = image_mean if image_mean is not None else self.image_mean
a__ : List[str] = image_std if image_std is not None else self.image_std
a__ : Optional[int] = size if size is not None else self.size
a__ : Any = get_size_dict(lowercase , default_to_square=lowercase)
a__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
a__ : Optional[int] = get_size_dict(lowercase , param_name='crop_size')
if not valid_images(lowercase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
a__ : List[Any] = make_batched(lowercase)
a__ : int = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
a__ : int = {'pixel_values': videos}
return BatchFeature(data=lowercase , tensor_type=lowercase)
| 352 |
import enum
import shutil
import sys
lowercase , lowercase : List[Any] = shutil.get_terminal_size()
lowercase : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
"""simple docstring"""
__A : List[str] = 0
__A : str = 1
def A_ ( A__ , A__="" ) -> int:
sys.stdout.write(str(A__ ) + end )
sys.stdout.flush()
def A_ ( A__ , A__ , A__="" ) -> int:
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , A__ )
def A_ ( ) -> Any:
forceWrite('\r' )
def A_ ( A__ , A__ ) -> List[str]:
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def A_ ( ) -> Any:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def A_ ( ) -> Any:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 225 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
_snake_case = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_snake_case = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = CamembertTokenizer
_a = CamembertTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Optional[Any] = CamembertTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ) -> Any:
_A : str = """<pad>"""
_A : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def a__ ( self ) -> List[Any]:
_A : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_a ) , 1004 )
def a__ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def a__ ( self ) -> Union[str, Any]:
_A : Optional[int] = CamembertTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
_A : List[Any] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_A : int = """I was born in 92000, and this is falsé."""
_A : Any = tokenizer.encode(_a )
_A : int = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
_A : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
_A : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_A : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
_A : List[str] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
def a__ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
_A : Optional[int] = self.get_tokenizer()
_A : Dict = self.get_rust_tokenizer()
_A : Any = """I was born in 92000, and this is falsé."""
_A : Any = tokenizer.tokenize(_a )
_A : Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_A : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
_A : Tuple = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_A : List[Any] = self.get_rust_tokenizer()
_A : List[Any] = tokenizer.encode(_a )
_A : Union[str, Any] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def a__ ( self ) -> Tuple:
# fmt: off
_A : Any = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_A : List[str] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=_a , )
| 26 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A (_lowercase , unittest.TestCase):
__lowercase: int = FunnelTokenizer
__lowercase: Optional[Any] = FunnelTokenizerFast
__lowercase: Optional[int] = True
__lowercase: Optional[int] = True
def lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
super().setUp()
snake_case_ = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : Union[str, Any] , **UpperCAmelCase_ : Dict ) ->int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase ( self : Tuple , **UpperCAmelCase_ : str ) ->Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
snake_case_ = """UNwant\u00E9d,running"""
snake_case_ = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
snake_case_ = self.tokenizer_class(self.vocab_file )
snake_case_ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
snake_case_ = tokenizer("""UNwant\u00E9d,running""" )
snake_case_ = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
snake_case_ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 360 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 233 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_SCREAMING_SNAKE_CASE : List[Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = load_tool("text-question-answering" )
self.tool.setup()
snake_case_ = load_tool("text-question-answering" , remote=a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(a__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(a__ , "launched the BigScience Research Workshop" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(a__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(a__ , "launched the BigScience Research Workshop" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tool(text=a__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(a__ , "launched the BigScience Research Workshop" )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.remote_tool(text=a__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(a__ , "launched the BigScience Research Workshop" )
| 85 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """OwlViTImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Any):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase) or (isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(text[0] , _lowerCAmelCase)):
__lowercase =[self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(text[0] , _lowerCAmelCase):
__lowercase =[]
# Maximum number of queries across batch
__lowercase =max([len(_lowerCAmelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase) != max_num_queries:
__lowercase =t + [' '] * (max_num_queries - len(_lowerCAmelCase))
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
encodings.append(_lowerCAmelCase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
__lowercase =np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase =jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase =torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
__lowercase =torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase =tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
__lowercase =BatchEncoding()
__lowercase =input_ids
__lowercase =attention_mask
if query_images is not None:
__lowercase =BatchEncoding()
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase).pixel_values
__lowercase =query_pixel_values
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 166 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Union[str, Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 126 | """simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> float:
if not nums:
raise ValueError("""List is empty""" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 126 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __a :
def __init__( self : Dict , __magic_name__ : List[str] , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Union[str, Any] = 13
UpperCAmelCase_ : int = 7
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Union[str, Any] = 99
UpperCAmelCase_ : List[str] = 32
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : Any = 37
UpperCAmelCase_ : str = '''gelu'''
UpperCAmelCase_ : Union[str, Any] = 0.1
UpperCAmelCase_ : Union[str, Any] = 0.1
UpperCAmelCase_ : Optional[Any] = 5_12
UpperCAmelCase_ : Optional[int] = 16
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : Tuple = 0.0_2
UpperCAmelCase_ : Dict = 3
UpperCAmelCase_ : Any = 4
UpperCAmelCase_ : int = None
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[str] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Any = TFEsmModel(config=__magic_name__ )
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ : Dict = model(__magic_name__ )
UpperCAmelCase_ : str = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ )
UpperCAmelCase_ : Dict = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : int , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Dict = TFEsmModel(config=__magic_name__ )
UpperCAmelCase_ : List[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
UpperCAmelCase_ : str = model(__magic_name__ )
UpperCAmelCase_ : List[Any] = [input_ids, input_mask]
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , encoder_hidden_states=__magic_name__ )
# Also check the case where encoder outputs are not passed
UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFEsmForMaskedLM(config=__magic_name__ )
UpperCAmelCase_ : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Optional[int] = TFEsmForTokenClassification(config=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
UpperCAmelCase_ : int = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Any = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__a : Union[str, Any] = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : List[str] = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFEsmModelTester(self )
UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__magic_name__ )
def UpperCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : str = TFEsmModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(__magic_name__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_ : Dict = model.get_bias()
assert isinstance(__magic_name__ , __magic_name__ )
for k, v in name.items():
assert isinstance(__magic_name__ , tf.Variable )
else:
UpperCAmelCase_ : List[str] = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ : Any = model.get_bias()
assert name is None
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : List[str] = model(__magic_name__ )[0]
UpperCAmelCase_ : List[str] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __magic_name__ )
# compare the actual values for a slice.
UpperCAmelCase_ : str = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
UpperCAmelCase_ : str = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : str = model(__magic_name__ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Optional[int] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
snake_case_ : List[str] = logging.getLogger(__name__)
@dataclass
class __a :
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__a : int = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the training data."} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} )
__a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "A csv or a json file containing the test data."} )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
UpperCAmelCase_ : Dict = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase_ : Union[str, Any] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__a : str = field(
default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCamelCase_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
UpperCAmelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase_ : Dict = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase_ : Dict = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase_ : int = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
UpperCAmelCase_ : List[Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase_ : int = load_dataset('''json''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase_ : Optional[Any] = raw_datasets['''train'''].features['''label'''].names
UpperCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# load tapex tokenizer
UpperCAmelCase_ : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase_ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase_ : Tuple = {'''Refused''': 0, '''Entailed''': 1}
UpperCAmelCase_ : Tuple = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ : int = min(data_args.max_seq_length, tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCAmelCase_ : List[str] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
UpperCAmelCase_ : Any = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] )
return _table_pd
UpperCAmelCase_ : Optional[Any] = examples['''statement''']
UpperCAmelCase_ : Union[str, Any] = list(map(_convert_table_text_to_pandas, examples['''table_text'''] ) )
UpperCAmelCase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, padding=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, truncation=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
UpperCAmelCase_ : List[str] = raw_datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on dataset''', )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ : Any = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Dict = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ : str = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
UpperCAmelCase_ : Dict = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCAmelCase_ : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ):
UpperCAmelCase_ : Any = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions
UpperCAmelCase_ : Optional[int] = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase_ : str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 )
else:
UpperCAmelCase_ : List[Any] = None
# Initialize our Trainer
UpperCAmelCase_ : int = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
UpperCAmelCase_ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Optional[int] = last_checkpoint
UpperCAmelCase_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = train_result.metrics
UpperCAmelCase_ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
UpperCAmelCase_ : List[Any] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase_ : Optional[int] = predict_dataset.remove_columns('''label''' )
UpperCAmelCase_ : Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' ).predictions
UpperCAmelCase_ : Any = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
UpperCAmelCase_ : int = os.path.join(training_args.output_dir, '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Dict = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCAmelCase_ : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 125 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A__ ( A__ ):
def __init__( self : str , _a : Dict , _a : int , _a : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =process
_SCREAMING_SNAKE_CASE =params
def __len__( self : int ) -> Tuple:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[Any] , _a : int ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.dataset[i]
_SCREAMING_SNAKE_CASE =self.process(_a , **self.params )
return processed
class A__ ( A__ ):
def __init__( self : str , _a : Optional[Any] , _a : Tuple , _a : int , _a : Any=None ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =loader
_SCREAMING_SNAKE_CASE =infer
_SCREAMING_SNAKE_CASE =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =loader_batch_size
# Internal bookkeeping
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
def __len__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return len(self.loader )
def __iter__( self : Any ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =iter(self.loader )
return self
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_SCREAMING_SNAKE_CASE =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_SCREAMING_SNAKE_CASE ={}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
_SCREAMING_SNAKE_CASE =element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_SCREAMING_SNAKE_CASE =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE =np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_SCREAMING_SNAKE_CASE =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_SCREAMING_SNAKE_CASE =self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_SCREAMING_SNAKE_CASE =next(self.iterator )
_SCREAMING_SNAKE_CASE =self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
_SCREAMING_SNAKE_CASE =processed
else:
_SCREAMING_SNAKE_CASE =list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE =processed[key]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =len(_a )
else:
_SCREAMING_SNAKE_CASE =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE =observed_batch_size
# Setting internal index to unwrap the batch
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A__ ( A__ ):
def __init__( self : List[str] , _a : Any , _a : Any , _a : int , _a : Optional[Any]=None ) -> str:
'''simple docstring'''
super().__init__(_a , _a , _a )
def __iter__( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =iter(self.loader )
_SCREAMING_SNAKE_CASE =None
return self
def A ( self : Any ) -> str:
'''simple docstring'''
if self.subiterator is None:
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_SCREAMING_SNAKE_CASE =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
_SCREAMING_SNAKE_CASE =next(self.subiterator )
return processed
class A__ ( A__ ):
def __iter__( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =iter(self.loader )
return self
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE =self.loader_batch_item()
_SCREAMING_SNAKE_CASE =item.pop('is_last' )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
_SCREAMING_SNAKE_CASE =processed
else:
_SCREAMING_SNAKE_CASE =list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE =processed[key]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =len(_a )
else:
_SCREAMING_SNAKE_CASE =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE =observed_batch_size
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =0
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE =self.loader_batch_item()
_SCREAMING_SNAKE_CASE =item.pop('is_last' )
accumulator.append(_a )
if is_last:
return accumulator
else:
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =item.pop('is_last' )
accumulator.append(_a )
return accumulator
class A__ ( A__ ):
def __init__( self : List[Any] , _a : Dataset , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =key
def __len__( self : Optional[int] ) -> str:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[Any] , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.dataset[i][self.key]
class A__ ( A__ ):
def __init__( self : Any , _a : Dataset , _a : str , _a : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =keya
_SCREAMING_SNAKE_CASE =keya
def __len__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Optional[int] , _a : Dict ) -> List[str]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 114 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[int] = {
"configuration_efficientnet": [
"EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientNetConfig",
"EfficientNetOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["EfficientNetImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
"EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientNetForImageClassification",
"EfficientNetModel",
"EfficientNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 114 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Optional[Any] ={
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str =["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int =[
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] =[
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str =["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int =[
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 128 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 0
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
# Check that tokenizer_type ≠ model_type
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(snake_case__ , "vocab.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="bert" )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(snake_case__ , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(snake_case__ , "merges.txt" ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type="gpt2" )
self.assertIsInstance(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
with pytest.raises(snake_case__ ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase_ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ )
else:
self.assertEqual(tokenizer.do_lower_case , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case__ , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
UpperCamelCase_ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TOKENIZER_MAPPING.values()
UpperCamelCase_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=snake_case__ ) , snake_case__ )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , snake_case__ )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=snake_case__ )
UpperCamelCase_ = "Hello, world. How are you?"
UpperCamelCase_ = tokenizer.tokenize(snake_case__ )
self.assertEqual("[UNK]" , tokens[0] )
UpperCamelCase_ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=snake_case__ )
UpperCamelCase_ = tokenizer.tokenize(snake_case__ )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = get_tokenizer_config("bert-base-cased" )
UpperCamelCase_ = config.pop("_commit_hash" , snake_case__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case__ , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase_ = get_tokenizer_config(snake_case__ )
self.assertDictEqual(snake_case__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = get_tokenizer_config(snake_case__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def _lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
UpperCamelCase_ = CustomTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
# Can register in two steps
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = CustomTokenizerFast.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def _lowerCamelCase ( self ):
'''simple docstring'''
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = False
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = NewTokenizer
lowercase__ = False
try:
AutoConfig.register("custom" , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# If remote code is not set, the default is to use local
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase_ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase_ = AutoTokenizer.from_pretrained("bert-base" )
def _lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase_ = AutoTokenizer.from_pretrained(snake_case__ , revision="aaaaaa" )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 128 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : List[str]=1_6 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : Dict=4 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = FlaxBertModelTester(self )
@slow
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = FlaxBertModel.from_pretrained('bert-base-cased' )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 249 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["ConvNextFeatureExtractor"]
_lowerCamelCase : Optional[Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 249 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
snake_case_ : Union[str, Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Union[str, Any]=None, SCREAMING_SNAKE_CASE__ : Dict=None, SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> int:
UpperCAmelCase_ : List[str] = True
while ask_again:
UpperCAmelCase_ : Tuple = input(SCREAMING_SNAKE_CASE__ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE__ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str]=[], SCREAMING_SNAKE_CASE__ : List[str]=None, SCREAMING_SNAKE_CASE__ : str=0 ) -> Dict:
UpperCAmelCase_ : int = BulletMenu(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = menu.run(default_choice=SCREAMING_SNAKE_CASE__ )
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int:
UpperCAmelCase_ : List[Any] = int(SCREAMING_SNAKE_CASE__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = int(SCREAMING_SNAKE_CASE__ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
UpperCAmelCase_ : Dict = int(SCREAMING_SNAKE_CASE__ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
return {"yes": True, "no": False}[value.lower()]
class __a (argparse.RawDescriptionHelpFormatter ):
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = super()._format_usage(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[str] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 125 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
snake_case_ : List[str] = logging.getLogger(__name__)
@dataclass
class __a :
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__a : int = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the training data."} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} )
__a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "A csv or a json file containing the test data."} )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
UpperCAmelCase_ : Dict = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase_ : Union[str, Any] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__a : str = field(
default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCamelCase_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
UpperCAmelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase_ : Dict = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase_ : Dict = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase_ : int = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
UpperCAmelCase_ : List[Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase_ : int = load_dataset('''json''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase_ : Optional[Any] = raw_datasets['''train'''].features['''label'''].names
UpperCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# load tapex tokenizer
UpperCAmelCase_ : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase_ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase_ : Tuple = {'''Refused''': 0, '''Entailed''': 1}
UpperCAmelCase_ : Tuple = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ : int = min(data_args.max_seq_length, tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCAmelCase_ : List[str] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
UpperCAmelCase_ : Any = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] )
return _table_pd
UpperCAmelCase_ : Optional[Any] = examples['''statement''']
UpperCAmelCase_ : Union[str, Any] = list(map(_convert_table_text_to_pandas, examples['''table_text'''] ) )
UpperCAmelCase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, padding=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, truncation=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
UpperCAmelCase_ : List[str] = raw_datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on dataset''', )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ : Any = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Dict = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ : str = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
UpperCAmelCase_ : Dict = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCAmelCase_ : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ):
UpperCAmelCase_ : Any = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions
UpperCAmelCase_ : Optional[int] = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase_ : str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 )
else:
UpperCAmelCase_ : List[Any] = None
# Initialize our Trainer
UpperCAmelCase_ : int = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
UpperCAmelCase_ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Optional[int] = last_checkpoint
UpperCAmelCase_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = train_result.metrics
UpperCAmelCase_ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
UpperCAmelCase_ : List[Any] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase_ : Optional[int] = predict_dataset.remove_columns('''label''' )
UpperCAmelCase_ : Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' ).predictions
UpperCAmelCase_ : Any = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
UpperCAmelCase_ : int = os.path.join(training_args.output_dir, '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Dict = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCAmelCase_ : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 125 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
a_ = [[1, 2, 4], [1, 2, 3, 4]]
a_ = DisjunctiveConstraint(__UpperCAmelCase)
self.assertTrue(isinstance(dc.token_ids , __UpperCAmelCase))
with self.assertRaises(__UpperCAmelCase):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(__UpperCAmelCase):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def UpperCAmelCase__ ( self) ->List[str]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
a_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase):
DisjunctiveConstraint(__UpperCAmelCase) # fails here
def UpperCAmelCase__ ( self) ->Any:
a_ = [[1, 2, 3], [1, 2, 4]]
a_ = DisjunctiveConstraint(__UpperCAmelCase)
a_ , a_ , a_ = dc.update(1)
a_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
a_ , a_ , a_ = dc.update(2)
a_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
a_ , a_ , a_ = dc.update(3)
a_ = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a_ = DisjunctiveConstraint(__UpperCAmelCase)
a_ , a_ , a_ = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
a_ , a_ , a_ = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
a_ , a_ , a_ = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
a_ , a_ , a_ = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
a_ , a_ , a_ = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
a_ , a_ , a_ = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
a_ , a_ , a_ = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5]) | 303 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 1 |
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
stooge(_lowercase , 0 , len(_lowercase ) - 1 )
return arr
def lowerCamelCase_ ( _a : Optional[int] , _a : int , _a : List[str] ):
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase_ : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase_ : Optional[Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_lowercase , _lowercase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_lowercase , i + t , (_lowercase) )
# Recursively sort first 2/3 elements
stooge(_lowercase , _lowercase , (h - t) )
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 345 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = split_dict._to_yaml_list()
assert len(_lowercase ) == len(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = SplitDict._from_yaml_list(_lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE : Any = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def A ( _lowercase ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE : List[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 182 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a__: Tuple = datasets.logging.get_logger(__name__)
a__: Optional[int] = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
a__: Tuple = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
a__: Optional[int] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def UpperCamelCase__( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[Any]="dummy_doc" )->List[str]:
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(UpperCamelCase__ , key_doc_lines[doc] , UpperCamelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(UpperCamelCase__ , key_doc_lines[doc] , UpperCamelCase__ , UpperCamelCase__ )
A__ , A__ = reader.get_doc_mentions(UpperCamelCase__ , sys_doc_lines[doc] , UpperCamelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(UpperCamelCase__ , key_doc_lines[doc] , UpperCamelCase__ , UpperCamelCase__ )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(UpperCamelCase__ , UpperCamelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(UpperCamelCase__ , UpperCamelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(UpperCamelCase__ , UpperCamelCase__ )
A__ = reader.get_mention_assignments(UpperCamelCase__ , UpperCamelCase__ )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] )->List[str]:
A__ = get_coref_infos(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(UpperCamelCase__ , UpperCamelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 1_00:.2f}" , f" Precision: {precision * 1_00:.2f}" , f" F1: {fa * 1_00:.2f}" , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_00
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Tuple:
A__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ),codebase_urls=['''https://github.com/ns-moosavi/coval'''],reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
],)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=False ):
A__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(__lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=__lowerCamelCase,sys_lines=__lowerCamelCase,metrics=__lowerCamelCase,NP_only=__lowerCamelCase,remove_nested=__lowerCamelCase,keep_singletons=__lowerCamelCase,min_span=__lowerCamelCase,)
return score
| 39 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: List[Any] = logging.get_logger(__name__)
a__: Optional[Any] = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''unispeech'''
def __init__( self,__lowerCamelCase=32,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=0.02,__lowerCamelCase=1E-5,__lowerCamelCase="group",__lowerCamelCase="gelu",__lowerCamelCase=(512, 512, 512, 512, 512, 512, 512),__lowerCamelCase=(5, 2, 2, 2, 2, 2, 2),__lowerCamelCase=(10, 3, 3, 3, 3, 2, 2),__lowerCamelCase=False,__lowerCamelCase=128,__lowerCamelCase=16,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=0.05,__lowerCamelCase=10,__lowerCamelCase=2,__lowerCamelCase=0.0,__lowerCamelCase=10,__lowerCamelCase=0,__lowerCamelCase=320,__lowerCamelCase=2,__lowerCamelCase=0.1,__lowerCamelCase=100,__lowerCamelCase=256,__lowerCamelCase=256,__lowerCamelCase=0.1,__lowerCamelCase="mean",__lowerCamelCase=False,__lowerCamelCase=False,__lowerCamelCase=256,__lowerCamelCase=80,__lowerCamelCase=0,__lowerCamelCase=1,__lowerCamelCase=2,__lowerCamelCase=0.5,**__lowerCamelCase,):
super().__init__(**__lowerCamelCase,pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = list(__lowerCamelCase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ):
return functools.reduce(operator.mul,self.conv_stride,1 )
| 39 | 1 |
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : str = [0 for i in range(len(a__))]
# initialize interval's left pointer and right pointer
a_ , a_ : Optional[Any] = 0, 0
for i in range(1 , len(a__)):
# case when current index is inside the interval
if i <= right_pointer:
a_ : str = min(right_pointer - i + 1 , z_result[i - left_pointer])
a_ : List[str] = min_edge
while go_next(a__ , a__ , a__):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a_ , a_ : List[Any] = i, i + z_result[i] - 1
return z_result
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
return i + z_result[i] < len(a__) and s[z_result[i]] == s[i + z_result[i]]
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
a_ : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a_ : str = z_function(pattern + input_str)
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(a__):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : Dict = logging.get_logger(__name__)
__snake_case : Any = {"""vocab_file""": """sentencepiece.bpe.model"""}
__snake_case : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
__snake_case : Tuple = {
"""moussaKam/mbarthez""": 10_24,
"""moussaKam/barthez""": 10_24,
"""moussaKam/barthez-orangesum-title""": 10_24,
}
__snake_case : int = """▁"""
class A__(a_ ):
"""simple docstring"""
_A : str = VOCAB_FILES_NAMES
_A : int = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase = None , **_lowercase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
a_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
a_ : List[Any] = vocab_file
a_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
a_ : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
a_ : str = len(self.sp_model ) - 1
a_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
a_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
a_ : Optional[Any] = [self.sep_token_id]
a_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ) -> List[Any]:
return len(self.sp_model )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : List[Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , _lowercase ) -> List[str]:
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Optional[int] = self.sp_model.PieceToId(_lowercase )
return spm_id if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> Any:
a_ : Dict = []
a_ : Union[str, Any] = """"""
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowercase ) + token
a_ : Dict = True
a_ : int = []
else:
current_sub_tokens.append(_lowercase )
a_ : Union[str, Any] = False
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def __getstate__( self ) -> Optional[int]:
a_ : Any = self.__dict__.copy()
a_ : int = None
return state
def __setstate__( self , _lowercase ) -> Union[str, Any]:
a_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : str = {}
a_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a_ : Dict = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , """wb""" ) as fi:
a_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
| 248 | 1 |
import string
import numpy
def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
return b if a == 0 else greatest_common_divisor(b % a , A_ )
class lowerCamelCase__ :
_lowerCAmelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_lowerCAmelCase = numpy.vectorize(lambda _a : x % 36 )
_lowerCAmelCase = numpy.vectorize(a_ )
def __init__( self : Any , _a : numpy.ndarray ):
a__: Any =self.modulus(lowercase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a__: int =encrypt_key.shape[0]
def _lowerCamelCase ( self : Any , _a : str ):
return self.key_string.index(lowercase_ )
def _lowerCamelCase ( self : Any , _a : int ):
return self.key_string[round(lowercase_ )]
def _lowerCamelCase ( self : str ):
a__: List[str] =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a__: str =det % len(self.key_string )
a__: Union[str, Any] =len(self.key_string )
if greatest_common_divisor(lowercase_ , len(self.key_string ) ) != 1:
a__: str =(
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(lowercase_ )
def _lowerCamelCase ( self : Dict , _a : str ):
a__: Optional[Any] =[char for char in text.upper() if char in self.key_string]
a__: Optional[Any] =chars[-1]
while len(lowercase_ ) % self.break_key != 0:
chars.append(lowercase_ )
return "".join(lowercase_ )
def _lowerCamelCase ( self : Tuple , _a : str ):
a__: List[Any] =self.process_text(text.upper() )
a__: List[Any] =''''''
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
a__: int =text[i : i + self.break_key]
a__: List[str] =[self.replace_letters(lowercase_ ) for char in batch]
a__: Dict =numpy.array([vec] ).T
a__: List[str] =self.modulus(self.encrypt_key.dot(lowercase_ ) ).T.tolist()[
0
]
a__: int =''''''.join(
self.replace_digits(lowercase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowerCamelCase ( self : str ):
a__: Optional[Any] =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a__: Any =det % len(self.key_string )
a__: Dict =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
a__: Union[str, Any] =i
break
a__: List[Any] =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase_ ) )
def _lowerCamelCase ( self : List[Any] , _a : str ):
a__: int =self.make_decrypt_key()
a__: Any =self.process_text(text.upper() )
a__: str =''''''
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
a__: Optional[int] =text[i : i + self.break_key]
a__: Union[str, Any] =[self.replace_letters(lowercase_ ) for char in batch]
a__: Optional[Any] =numpy.array([vec] ).T
a__: Dict =self.modulus(decrypt_key.dot(lowercase_ ) ).T.tolist()[0]
a__: Any =''''''.join(
self.replace_digits(lowercase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __lowerCamelCase ( ):
a__: Any =int(input("Enter the order of the encryption key: " ) )
a__: Any =[]
print("Enter each row of the encryption key with space separated integers" )
for _ in range(A_ ):
a__: int =[int(A_ ) for x in input().split()]
hill_matrix.append(A_ )
a__: List[Any] =HillCipher(numpy.array(A_ ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
a__: Optional[Any] =input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
a__: str =input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(A_ ) )
elif option == "2":
a__: Dict =input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 357 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase__ ( _a ):
@require_torch
def _lowerCamelCase ( self : Union[str, Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Dict ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__: Dict ="\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__: Tuple ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
a__: Optional[int] =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__: str =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: Union[str, Any] ="1"
a__: Any =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : str ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Optional[int] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__: Tuple ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__: List[Any] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__: str ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(_a )
BertModel.from_pretrained(_a )
BertTokenizer.from_pretrained(_a )
pipeline(task="fill-mask" , model=_a )
# baseline - just load from_pretrained with normal network
a__: Any =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__: Optional[Any] =self.get_env()
a__: Union[str, Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__: Tuple ="\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__: str ="\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__: int ="\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__: Union[str, Any] =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__: Optional[Any] =self.get_env()
a__: Optional[Any] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__: int =[sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: Tuple ="1"
a__: Dict =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowerCamelCase ( self : Optional[Any] ):
a__: Dict ="\nfrom transformers import pipeline\n "
a__: Union[str, Any] ="\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__: List[str] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__: Dict =self.get_env()
a__: Optional[Any] ="1"
a__: Dict =[sys.executable, "-c", "\n".join([load, mock, run] )]
a__: Optional[int] =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
a__: Optional[Any] ="\nfrom transformers import AutoModel\n "
a__: str ="\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__: Tuple =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__: Any =self.get_env()
a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__: List[Any] ="1"
a__: int =subprocess.run(_a , env=_a , check=_a , capture_output=_a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 42 | 0 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = year % 19
__UpperCAmelCase = year % 4
__UpperCAmelCase = year % 7
__UpperCAmelCase = math.floor(year / 100 )
__UpperCAmelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__UpperCAmelCase = leap_day_inhibits / 4
__UpperCAmelCase = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__UpperCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__UpperCAmelCase = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__UpperCAmelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(snake_case__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(snake_case__ , 4 , 18 )
else:
return datetime(snake_case__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
_lowercase : Optional[Any] = 'will be' if year > datetime.now().year else 'was'
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 332 | from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCAmelCase : List[str] = (low + high) // 2
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = max_subarray(snake_case__, snake_case__, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_subarray(snake_case__, mid + 1, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_cross_sum(snake_case__, snake_case__, snake_case__, snake_case__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[int, int, float]:
__UpperCAmelCase , __UpperCAmelCase : Any = float("-inf" ), -1
__UpperCAmelCase , __UpperCAmelCase : Dict = float("-inf" ), -1
__UpperCAmelCase : int | float = 0
for i in range(snake_case__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCAmelCase : Optional[int] = summ
__UpperCAmelCase : Optional[Any] = i
__UpperCAmelCase : List[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCAmelCase : List[str] = summ
__UpperCAmelCase : Dict = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCamelCase ( snake_case__ ) -> float:
__UpperCAmelCase : Optional[int] = [randint(1, snake_case__ ) for _ in range(snake_case__ )]
__UpperCAmelCase : Optional[int] = time.time()
max_subarray(snake_case__, 0, input_size - 1 )
__UpperCAmelCase : List[str] = time.time()
return end - start
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : str = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__UpperCAmelCase : Optional[Any] = [time_max_subarray(snake_case__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(snake_case__, snake_case__ ):
print(snake_case__, "\t\t", snake_case__ )
plt.plot(snake_case__, snake_case__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :int = ["""image_processor""", """tokenizer"""]
__SCREAMING_SNAKE_CASE :List[str] = """BridgeTowerImageProcessor"""
__SCREAMING_SNAKE_CASE :Optional[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Dict , a__ : Optional[int] , a__ : Tuple ):
super().__init__(a__ , a__ )
def __call__( self : Tuple , a__ : Dict , a__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a__ : bool = True , a__ : Union[bool, str, PaddingStrategy] = False , a__ : Union[bool, str, TruncationStrategy] = None , a__ : Optional[int] = None , a__ : int = 0 , a__ : Optional[int] = None , a__ : Optional[bool] = None , a__ : Optional[bool] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : Optional[Union[str, TensorType]] = None , **a__ : Tuple , ):
__magic_name__ = self.tokenizer(
text=a__ , add_special_tokens=a__ , padding=a__ , truncation=a__ , max_length=a__ , stride=a__ , pad_to_multiple_of=a__ , return_token_type_ids=a__ , return_attention_mask=a__ , return_overflowing_tokens=a__ , return_special_tokens_mask=a__ , return_offsets_mapping=a__ , return_length=a__ , verbose=a__ , return_tensors=a__ , **a__ , )
# add pixel_values + pixel_mask
__magic_name__ = self.image_processor(
a__ , return_tensors=a__ , do_normalize=a__ , do_center_crop=a__ , **a__ )
encoding.update(a__ )
return encoding
def snake_case__ ( self : Dict , *a__ : Optional[int] , **a__ : Union[str, Any] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : Dict , *a__ : Tuple , **a__ : int ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : int ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE :Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE :Optional[Any] = frozenset([] )
def snake_case__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
__magic_name__ = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Any , a__ : Optional[int] , a__ : List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__magic_name__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((64, 64) )
__magic_name__ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = StableDiffusionInpaintPipeline(**a__ )
__magic_name__ = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = sd_pipe(**a__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Tuple ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case__ ( self : List[str] ):
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case__ ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__magic_name__ = '''stabilityai/stable-diffusion-2-inpainting'''
__magic_name__ = PNDMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__magic_name__ = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__magic_name__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type='''np''' , )
__magic_name__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 98 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """embed_dim"""))
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """num_heads"""))
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=6_4 , lowerCAmelCase__=3 , lowerCAmelCase__=[1_6, 4_8, 9_6] , lowerCAmelCase__=[1, 3, 6] , lowerCAmelCase__=[1, 2, 1_0] , lowerCAmelCase__=[7, 3, 3] , lowerCAmelCase__=[4, 2, 2] , lowerCAmelCase__=[2, 1, 1] , lowerCAmelCase__=[2, 2, 2] , lowerCAmelCase__=[False, False, True] , lowerCAmelCase__=[0.0, 0.0, 0.0] , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=2 , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = patch_stride
__SCREAMING_SNAKE_CASE = patch_padding
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = stride_kv
__SCREAMING_SNAKE_CASE = depth
__SCREAMING_SNAKE_CASE = cls_token
__SCREAMING_SNAKE_CASE = attention_drop_rate
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
# create a random int32 tensor of given shape
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = TFCvtModel(config=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , training=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for i in range(len(self.depth)):
__SCREAMING_SNAKE_CASE = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__SCREAMING_SNAKE_CASE = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowercase : Union[str, Any] = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
__lowercase : List[Any] = False
__lowercase : Any = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtModelTester(self)
__SCREAMING_SNAKE_CASE = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""")
def snake_case_ ( self):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case_ ( self):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""")) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case_ ( self):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.keras.mixed_precision.Policy("""mixed_float16""")
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""")
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = len(self.model_tester.depth)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def snake_case_ ( self):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFCvtModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""tf""")
# forward pass
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4))
| 100 | from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase__ : int = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = RoCBertTokenizer
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
UpperCAmelCase = {}
UpperCAmelCase = {}
for i, value in enumerate(A ):
UpperCAmelCase = i
UpperCAmelCase = i
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_shape_file"""] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file ,"""w""" ,encoding="""utf-8""" ) as word_shape_writer:
json.dump(A ,A ,ensure_ascii=A )
with open(self.word_pronunciation_file ,"""w""" ,encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(A ,A ,ensure_ascii=A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
UpperCAmelCase = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(A ,["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A ) ,[5, 6, 2, 5, 7, 8] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=A ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase = {}
for i, token in enumerate(A ):
UpperCAmelCase = i
UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=A ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def _UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(
A ,return_attention_mask=A ,return_token_type_ids=A ,return_offsets_mapping=A ,add_special_tokens=A ,)
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(A ,"""do_lower_case""" ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""的""", """人""", """有"""]
UpperCAmelCase = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = tokenizer_p.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = self.tokenizer_class.from_pretrained(A ,**A )
UpperCAmelCase = tokenizer_r.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_p.encode(A ,add_special_tokens=A )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(A )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,A )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
UpperCAmelCase = tokenizer.encode("""你好""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.encode("""你是谁""" ,add_special_tokens=A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizers(do_lower_case=A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase = """你好,你是谁"""
UpperCAmelCase = tokenizer.tokenize(A )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(A )
UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(A )
UpperCAmelCase = tokenizer.prepare_for_model(
A ,A ,A ,add_special_tokens=A )
UpperCAmelCase = tokenizer.encode_plus(A ,add_special_tokens=A )
self.assertEqual(A ,A )
| 351 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase__ ( _snake_case):
SCREAMING_SNAKE_CASE__ = '''ctrl'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , UpperCAmelCase=2_4_6_5_3_4 , UpperCAmelCase=2_5_6 , UpperCAmelCase=1_2_8_0 , UpperCAmelCase=8_1_9_2 , UpperCAmelCase=4_8 , UpperCAmelCase=1_6 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-6 , UpperCAmelCase=0.02 , UpperCAmelCase=True , **UpperCAmelCase , ) -> Union[str, Any]:
_lowercase =vocab_size
_lowercase =n_positions
_lowercase =n_embd
_lowercase =n_layer
_lowercase =n_head
_lowercase =dff
_lowercase =resid_pdrop
_lowercase =embd_pdrop
_lowercase =layer_norm_epsilon
_lowercase =initializer_range
_lowercase =use_cache
super().__init__(**_UpperCamelCase )
| 5 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class a_ ( lowerCamelCase ):
lowercase = """ibert"""
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="none" , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = quant_mode
UpperCamelCase = force_dequant
class a_ ( lowerCamelCase ):
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 183 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( )-> Tuple:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(__UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def A__ ( self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCamelCase = torch.device("""cpu""" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> str:
"""simple docstring"""
return False
| 183 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = SamImageProcessor()
lowerCAmelCase_ : str = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : Dict = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Dict = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Dict = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
lowerCAmelCase_ : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = self.get_image_processor()
lowerCAmelCase_ : List[str] = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Union[str, Any] = image_processor(__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : List[str] = processor(images=__lowercase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.get_image_processor()
lowerCAmelCase_ : int = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Optional[int] = [torch.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : Optional[int] = [[1_7_6_4, 2_6_4_6]]
lowerCAmelCase_ : Tuple = [[6_8_3, 1_0_2_4]]
lowerCAmelCase_ : str = processor.post_process_masks(__lowercase , __lowercase , __lowercase )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : Tuple = processor.post_process_masks(
__lowercase , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
lowerCAmelCase_ : Any = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : str = processor.post_process_masks(__lowercase , np.array(__lowercase ) , np.array(__lowercase ) )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : str = [[1, 0], [0, 1]]
with self.assertRaises(__lowercase ):
lowerCAmelCase_ : List[Any] = processor.post_process_masks(__lowercase , np.array(__lowercase ) , np.array(__lowercase ) )
@require_vision
@require_tf
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : Optional[int] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ : Dict = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
lowerCAmelCase_ : Any = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[int] = self.get_image_processor()
lowerCAmelCase_ : List[Any] = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Any = self.prepare_image_inputs()
lowerCAmelCase_ : List[Any] = image_processor(__lowercase , return_tensors='''np''' )
lowerCAmelCase_ : List[str] = processor(images=__lowercase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[Any] = self.get_image_processor()
lowerCAmelCase_ : Union[str, Any] = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Tuple = [tf.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : str = [[1_7_6_4, 2_6_4_6]]
lowerCAmelCase_ : str = [[6_8_3, 1_0_2_4]]
lowerCAmelCase_ : Dict = processor.post_process_masks(__lowercase , __lowercase , __lowercase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : Tuple = processor.post_process_masks(
__lowercase , tf.convert_to_tensor(__lowercase ) , tf.convert_to_tensor(__lowercase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
lowerCAmelCase_ : List[str] = [np.ones((1, 3, 5, 5) )]
lowerCAmelCase_ : Any = processor.post_process_masks(
__lowercase , np.array(__lowercase ) , np.array(__lowercase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1_7_6_4, 2_6_4_6) )
lowerCAmelCase_ : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCAmelCase_ : List[Any] = processor.post_process_masks(
__lowercase , np.array(__lowercase ) , np.array(__lowercase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : Any = SamImageProcessor()
lowerCAmelCase_ : Tuple = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **__lowercase ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowercase ).image_processor
def lowercase_ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.get_image_processor()
lowerCAmelCase_ : str = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : Union[str, Any] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCAmelCase_ : Optional[Any] = [tf.convert_to_tensor(__lowercase )]
lowerCAmelCase_ : Union[str, Any] = [torch.tensor(__lowercase )]
lowerCAmelCase_ : Any = [[1_7_6_4, 2_6_4_6]]
lowerCAmelCase_ : int = [[6_8_3, 1_0_2_4]]
lowerCAmelCase_ : List[Any] = processor.post_process_masks(
__lowercase , __lowercase , __lowercase , return_tensors='''tf''' )
lowerCAmelCase_ : List[Any] = processor.post_process_masks(
__lowercase , __lowercase , __lowercase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ : int = SamProcessor(image_processor=__lowercase )
lowerCAmelCase_ : List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ : Optional[Any] = image_processor(__lowercase , return_tensors='''pt''' )['''pixel_values'''].numpy()
lowerCAmelCase_ : List[str] = processor(images=__lowercase , return_tensors='''pt''' )['''pixel_values'''].numpy()
lowerCAmelCase_ : Optional[Any] = image_processor(__lowercase , return_tensors='''tf''' )['''pixel_values'''].numpy()
lowerCAmelCase_ : Tuple = processor(images=__lowercase , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertTrue(np.allclose(__lowercase , __lowercase ) ) | 262 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase ) | 262 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
A : str = '''naver-clova-ix/donut-base'''
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any]):
_A : Any = DonutProcessor.from_pretrained(SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : List[str] = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
_A : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
_A : str = self.processor.tokenajson(SCREAMING_SNAKE_CASE)
self.assertDictEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 227 |
'''simple docstring'''
class __lowerCamelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
_A : List[Any] = row
_A : Union[str, Any] = col
_A : List[str] = graph
def A ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[list[bool]]):
# Checking all 8 elements surrounding nth element
_A : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_A : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
_A : List[Any] = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , SCREAMING_SNAKE_CASE)
def A ( self : int): # And finally, count all islands.
_A : Dict = [[False for j in range(self.COL)] for i in range(self.ROW)]
_A : Union[str, Any] = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
count += 1
return count
| 227 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=0.6 , __UpperCAmelCase=None , ) -> List[str]:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCAmelCase ( self ) -> int:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = TFViTMAEModel(config=__UpperCAmelCase )
_a = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = TFViTMAEForPreTraining(__UpperCAmelCase )
_a = model(__UpperCAmelCase , training=__UpperCAmelCase )
# expected sequence length = num_patches
_a = (self.image_size // self.patch_size) ** 2
_a = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a = 1
_a = TFViTMAEForPreTraining(__UpperCAmelCase )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__UpperCAmelCase , training=__UpperCAmelCase )
_a = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
((_a) , (_a) , (_a)) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
A_ : List[Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
A_ : Optional[int] = False
A_ : Dict = False
A_ : Optional[Any] = False
A_ : Any = False
def _UpperCAmelCase ( self ) -> List[Any]:
_a = TFViTMAEModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> str:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> str:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_a = model(__UpperCAmelCase , noise=__UpperCAmelCase )
_a = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_a = model(**__UpperCAmelCase , noise=__UpperCAmelCase )
_a = outputs_dict[0].numpy()
_a = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _UpperCAmelCase ( self ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__UpperCAmelCase ):
_a = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__UpperCAmelCase ):
_a = v.numpy()
else:
_a = np.array(__UpperCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_a = prepare_numpy_arrays(__UpperCAmelCase )
_a = model(__UpperCAmelCase , noise=__UpperCAmelCase )
_a = model(**__UpperCAmelCase , noise=__UpperCAmelCase )
self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
# make masks reproducible
np.random.seed(2 )
_a = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = tf.constant(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a = tf_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__UpperCAmelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(__UpperCAmelCase , __UpperCAmelCase ),)
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__UpperCAmelCase , '''_keras_serializable''' , __UpperCAmelCase )
}
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = tf.convert_to_tensor(__UpperCAmelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_a = main_layer_class(__UpperCAmelCase )
_a = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_a = tf.keras.Model(__UpperCAmelCase , outputs=main_layer(__UpperCAmelCase ) )
_a = model(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__UpperCAmelCase , '''keras_model.h5''' )
model.save(__UpperCAmelCase )
_a = tf.keras.models.load_model(
__UpperCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__UpperCAmelCase , tf.keras.Model )
_a = model(__UpperCAmelCase )
self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_a = model(__UpperCAmelCase , noise=__UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_a = outputs.last_hidden_state.numpy()
_a = 0
else:
_a = outputs.logits.numpy()
_a = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase , saved_model=__UpperCAmelCase )
_a = model_class.from_pretrained(__UpperCAmelCase )
_a = model(__UpperCAmelCase , noise=__UpperCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_a = after_outputs['''last_hidden_state'''].numpy()
_a = 0
else:
_a = after_outputs['''logits'''].numpy()
_a = 0
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1e-5 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(__UpperCAmelCase )
_a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_a = model(__UpperCAmelCase , noise=__UpperCAmelCase )
_a = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__UpperCAmelCase )
_a = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_a = model_class.from_config(model.config )
_a = new_model(__UpperCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
_a = new_model(__UpperCAmelCase , noise=__UpperCAmelCase )
self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__UpperCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> str:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_a = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a = ViTMAEConfig()
_a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a = np.random.uniform(size=(1, num_patches) )
# forward pass
_a = model(**__UpperCAmelCase , noise=__UpperCAmelCase )
# verify the logits
_a = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_a = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ) | 320 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : str = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Dict = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Union[str, Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Tuple = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Optional[Any] = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
requires_backends(cls , ['''flax'''] )
class __lowerCamelCase ( metaclass=a__ ):
'''simple docstring'''
A_ : Any = ['flax']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(cls , ['''flax'''] ) | 320 | 1 |
from collections.abc import Generator
from math import sin
def __lowerCamelCase ( __a :bytes ) -> bytes:
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError("""Input must be of length 32""" )
A__ = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __lowerCamelCase ( __a :int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
A__ = format(__a , """08x""" )[-8:]
A__ = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def __lowerCamelCase ( __a :bytes ) -> bytes:
"""simple docstring"""
A__ = b""""""
for char in message:
bit_string += format(__a , """08b""" ).encode("""utf-8""" )
A__ = format(len(__a ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def __lowerCamelCase ( __a :bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(__a ) , 5_1_2 ):
A__ = bit_string[pos : pos + 5_1_2]
A__ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
A__ = format(__a , """032b""" )
A__ = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def __lowerCamelCase ( __a :int , __a :int ) -> int:
"""simple docstring"""
return (a + b) % 2**3_2
def __lowerCamelCase ( __a :int , __a :int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def __lowerCamelCase ( __a :bytes ) -> bytes:
"""simple docstring"""
A__ = preprocess(__a )
A__ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
A__ = 0X6_7_4_5_2_3_0_1
A__ = 0XE_F_C_D_A_B_8_9
A__ = 0X9_8_B_A_D_C_F_E
A__ = 0X1_0_3_2_5_4_7_6
A__ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
A__ = aa
A__ = ba
A__ = ca
A__ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A__ = d ^ (b & (c ^ d))
A__ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A__ = c ^ (d & (b ^ c))
A__ = (5 * i + 1) % 1_6
elif i <= 4_7:
A__ = b ^ c ^ d
A__ = (3 * i + 5) % 1_6
else:
A__ = c ^ (b | not_aa(__a ))
A__ = (7 * i) % 1_6
A__ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
A__ = d
A__ = c
A__ = b
A__ = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
A__ = sum_aa(__a , __a )
A__ = sum_aa(__a , __a )
A__ = sum_aa(__a , __a )
A__ = sum_aa(__a , __a )
A__ = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(__a )
A__ = date_parser.parse(__a )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A : Dict = parser.parse_args()
A : List[Any] = get_job_time(args.workflow_run_id)
A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 276 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : List[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int):
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_)
| 136 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
lowercase_ = {1: 1}
for inputa in range(2 , __lowerCAmelCase ):
lowercase_ = 0
lowercase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase_ = counter
if counter > pre_counter:
lowercase_ = inputa
lowercase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 136 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (UnCLIPScheduler,)
def A ( self : Union[str, Any] , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**UpperCamelCase__ )
return config
def A ( self : str ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='fixed_small_log' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(variance_type='learned_range' )
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=UpperCamelCase__ ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=UpperCamelCase__ ) - -0.0_0_1_0_0_1_1 < 1E-5
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(2_5 )
UpperCamelCase = scheduler.timesteps
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
UpperCamelCase = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase = None
else:
UpperCamelCase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
UpperCamelCase = pred_prev_sample
UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def A ( self : Tuple ):
"""simple docstring"""
pass
def A ( self : Optional[int] ):
"""simple docstring"""
pass
| 249 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = inspect.getfile(accelerate.test_utils )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __UpperCAmelCase ( self : Any ) -> List[Any]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowerCAmelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
print(F'''Found {torch.cuda.device_count()} devices.''' )
lowerCAmelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(F'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self : str ) -> Optional[int]:
print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
lowerCAmelCase = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case =Accelerator()
__snake_case =(accelerator.state.process_index + 2, 10)
__snake_case =torch.randint(0, 10, shape).to(accelerator.device)
__snake_case =""""""
__snake_case =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__snake_case =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__snake_case =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 4 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a_ ( lowerCamelCase : Dict ):
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example['content'] , truncation=lowerCamelCase )['input_ids']
lowerCAmelCase = len(example['content'] ) / len(output['input_ids'] )
return output
__snake_case =HfArgumentParser(PretokenizationArguments)
__snake_case =parser.parse_args()
if args.num_workers is None:
__snake_case =multiprocessing.cpu_count()
__snake_case =AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case =time.time()
__snake_case =load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
__snake_case =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 4 | 1 |
UpperCAmelCase =range(2, 20 + 1)
UpperCAmelCase =[10**k for k in range(ks[-1] + 1)]
UpperCAmelCase ={}
def _A ( _a : int , _a : Tuple , _a : Tuple , _a : Optional[int] ):
"""simple docstring"""
A = sum(a_i[j] for j in range(_a , len(_a ) ) )
A = sum(a_i[j] * base[j] for j in range(min(len(_a ) , _a ) ) )
A , A = 0, 0
A = n - i
A = memo.get(_a )
if sub_memo is not None:
A = sub_memo.get(_a )
if jumps is not None and len(_a ) > 0:
# find and make the largest jump without going over
A = -1
for _k in range(len(_a ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A = _k
break
if max_jump >= 0:
A , A , A = jumps[max_jump]
# since the difference between jumps is cached, add c
A = diff + c
for j in range(min(_a , len(_a ) ) ):
A , A = divmod(_a , 1_0 )
if new_c > 0:
add(_a , _a , _a )
else:
A = []
else:
A = {c: []}
A = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A , A = next_term(_a , k - 1 , i + dn , _a )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A , A = compute(_a , _a , i + dn , _a )
diff += _diff
dn += terms_jumped
A = sub_memo[c]
# keep jumps sorted by # of terms skipped
A = 0
while j < len(_a ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_a , (diff, dn, k) )
return (diff, dn)
def _A ( _a : int , _a : Dict , _a : str , _a : Tuple ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(_a ):
a_i.extend([0 for _ in range(k - len(_a ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A = i
A , A , A = 0, 0, 0
for j in range(len(_a ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A = ds_c + ds_b
diff += addend
A = 0
for j in range(_a ):
A = a_i[j] + addend
A , A = divmod(_a , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_a , _a , _a )
return diff, i - start_i
def _A ( _a : Union[str, Any] , _a : Optional[int] , _a : Any ):
"""simple docstring"""
for j in range(_a , len(_a ) ):
A = digits[j] + addend
if s >= 1_0:
A , A = divmod(_a , 1_0 )
A = addend // 1_0 + quotient
else:
A = s
A = addend // 1_0
if addend == 0:
break
while addend > 0:
A , A = divmod(_a , 1_0 )
digits.append(_a )
def _A ( _a : int = 1_0**1_5 ):
"""simple docstring"""
A = [1]
A = 1
A = 0
while True:
A , A = next_term(_a , 2_0 , i + dn , _a )
dn += terms_jumped
if dn == n - i:
break
A = 0
for j in range(len(_a ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 355 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase =logging.get_logger(__name__)
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = True ,lowerCamelCase_ = None ,lowerCamelCase_ = True ,lowerCamelCase_ = 1 / 2_5_5 ,lowerCamelCase_ = True ,lowerCamelCase_ = IMAGENET_DEFAULT_MEAN ,lowerCamelCase_ = IMAGENET_DEFAULT_STD ,**lowerCamelCase_ ,) -> None:
super().__init__(**lowerCamelCase_ )
A = size if size is not None else {"""shortest_edge""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = PILImageResampling.BICUBIC ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A = int((2_5_6 / 2_2_4) * size["""shortest_edge"""] )
A = get_resize_output_image_size(lowerCamelCase_ ,size=lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
lowerCamelCase_ ,size=(size_dict["""height"""], size_dict["""width"""]) ,resample=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
A = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase_ ,size=(size["""height"""], size["""width"""]) ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return rescale(lowerCamelCase_ ,scale=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> np.ndarray:
return normalize(lowerCamelCase_ ,mean=lowerCamelCase_ ,std=lowerCamelCase_ ,data_format=lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = ChannelDimension.FIRST ,**lowerCamelCase_ ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(lowerCamelCase_ ,default_to_square=lowerCamelCase_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(lowerCamelCase_ ,param_name="""crop_size""" )
A = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A = [to_numpy_array(lowerCamelCase_ ) for image in images]
if do_resize:
A = [self.resize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_center_crop:
A = [self.center_crop(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_rescale:
A = [self.rescale(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
if do_normalize:
A = [self.normalize(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = [to_channel_dimension_format(lowerCamelCase_ ,lowerCamelCase_ ) for image in images]
A = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase_ ,tensor_type=lowerCamelCase_ )
| 77 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self : List[str] , A : List[str] , A : List[str]=2 , A : Tuple=True , A : Union[str, Any]=False , A : Optional[int]=10 , A : Dict=3 , A : Union[str, Any]=32 * 8 , A : Union[str, Any]=32 * 8 , A : Optional[int]=4 , A : Any=64 , ) -> str:
lowercase_ : List[str] = parent
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = is_training
lowercase_ : Optional[Any] = use_auxiliary_loss
lowercase_ : List[Any] = num_queries
lowercase_ : Any = num_channels
lowercase_ : Tuple = min_size
lowercase_ : List[str] = max_size
lowercase_ : str = num_labels
lowercase_ : Tuple = hidden_dim
lowercase_ : int = hidden_dim
def A ( self : int ) -> Dict:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A )
lowercase_ : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A )
lowercase_ : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A ) > 0.5
).float()
lowercase_ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=A ) > 0.5).long()
lowercase_ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A ( self : Optional[int] ) -> Optional[Any]:
lowercase_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase_ : str = self.num_queries
lowercase_ : Dict = self.num_labels
lowercase_ : Union[str, Any] = [1, 1, 1, 1]
lowercase_ : Tuple = self.num_channels
lowercase_ : int = 64
lowercase_ : Optional[Any] = 1_28
lowercase_ : int = self.hidden_dim
lowercase_ : List[Any] = self.hidden_dim
lowercase_ : Union[str, Any] = self.hidden_dim
return config
def A ( self : Optional[int] ) -> Dict:
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = self.prepare_config_and_inputs()
lowercase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def A ( self : List[Any] , A : int , A : int ) -> Tuple:
lowercase_ : List[str] = output.encoder_hidden_states
lowercase_ : Tuple = output.pixel_decoder_hidden_states
lowercase_ : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) , config.decoder_layers )
def A ( self : Any , A : Optional[Any] , A : Optional[Any] , A : int , A : int=False ) -> Union[str, Any]:
with torch.no_grad():
lowercase_ : List[Any] = MaskaFormerModel(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(pixel_values=A , pixel_mask=A )
lowercase_ : Tuple = model(A , output_hidden_states=A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A , A )
def A ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : str , A : Optional[Any] , A : Tuple ) -> Optional[Any]:
lowercase_ : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=A )
model.to(A )
model.eval()
def comm_check_on_output(A : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase_ : Dict = model(pixel_values=A , pixel_mask=A )
lowercase_ : Dict = model(A )
comm_check_on_output(A )
lowercase_ : str = model(
pixel_values=A , pixel_mask=A , mask_labels=A , class_labels=A )
comm_check_on_output(A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = False
def A ( self : Dict ) -> Dict:
lowercase_ : Any = MaskaFormerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : Union[str, Any] ) -> List[str]:
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A , **A , output_hidden_states=A )
def A ( self : List[str] ) -> Any:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def A ( self : Dict ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def A ( self : Union[str, Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def A ( self : int ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def A ( self : Tuple ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A ( self : Dict ) -> List[str]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> int:
pass
def A ( self : Optional[int] ) -> int:
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(A )
lowercase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
@slow
def A ( self : Tuple ) -> Optional[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase_ : Tuple = MaskaFormerModel.from_pretrained(A )
self.assertIsNotNone(A )
def A ( self : Tuple ) -> List[Any]:
lowercase_ : Dict = (self.model_tester.min_size,) * 2
lowercase_ : Optional[Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A ),
'''mask_labels''': torch.randn((2, 10, *size) , device=A ),
'''class_labels''': torch.zeros(2 , 10 , device=A ).long(),
}
lowercase_ : int = self.model_tester.get_config()
lowercase_ : str = MaskaFormerForUniversalSegmentation(A ).to(A )
lowercase_ : Optional[int] = model(**A )
self.assertTrue(outputs.loss is not None )
def A ( self : str ) -> List[str]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A , **A , output_hidden_states=A )
def A ( self : List[Any] ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(A ).to(A )
lowercase_ : Any = model(**A , output_attentions=A )
self.assertTrue(outputs.attentions is not None )
def A ( self : int ) -> str:
if not self.model_tester.is_training:
return
lowercase_ : Any = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
lowercase_ : Tuple = model_class(A )
model.to(A )
model.train()
lowercase_ : Optional[int] = model(A , mask_labels=A , class_labels=A ).loss
loss.backward()
def A ( self : Any ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.all_model_classes[1]
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Tuple = True
lowercase_ : List[str] = model_class(A ).to(A )
model.train()
lowercase_ : Dict = model(A , mask_labels=A , class_labels=A )
lowercase_ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase_ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase_ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A : Any = 1E-4
def lowercase ( ):
lowercase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> str:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A ( self : Optional[int] ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A ( self : Tuple ) -> Tuple:
lowercase_ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A )
lowercase_ : Any = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Optional[int] = image_processor(A , return_tensors='''pt''' ).to(A )
lowercase_ : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowercase_ : Dict = model(**A )
lowercase_ : List[str] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A , atol=A ) )
lowercase_ : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A , atol=A ) )
lowercase_ : Tuple = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A , atol=A ) )
def A ( self : Union[str, Any] ) -> Dict:
lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
lowercase_ : Optional[int] = self.default_image_processor
lowercase_ : List[str] = prepare_img()
lowercase_ : List[Any] = image_processor(A , return_tensors='''pt''' ).to(A )
lowercase_ : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowercase_ : List[Any] = model(**A )
# masks_queries_logits
lowercase_ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase_ : Optional[int] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase_ : str = torch.tensor(A ).to(A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A , atol=A ) )
# class_queries_logits
lowercase_ : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase_ : Optional[Any] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=A ) )
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase_ : Optional[int] = inputs['''pixel_values'''].to(A )
lowercase_ : Union[str, Any] = [el.to(A ) for el in inputs['''mask_labels''']]
lowercase_ : str = [el.to(A ) for el in inputs['''class_labels''']]
with torch.no_grad():
lowercase_ : List[Any] = model(**A )
self.assertTrue(outputs.loss is not None )
| 33 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, _lowerCamelCase )
self.assertIsInstance(encoding.boxes, _lowerCamelCase )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# with apply_OCR = True
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
__A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _lowerCamelCase )
self.assertListEqual(encoding.boxes, _lowerCamelCase )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 266 | 0 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (KDPMaDiscreteScheduler,)
_SCREAMING_SNAKE_CASE = 10
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : int = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def SCREAMING_SNAKE_CASE__ ( self : str ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : Tuple = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase_ : int = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Optional[int] = self.dummy_model()
lowerCAmelCase_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : Tuple = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = output.prev_sample
lowerCAmelCase_ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
if torch_device == "mps":
return
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : Dict = self.get_scheduler_config()
lowerCAmelCase_ : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : Optional[Any] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Union[str, Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = output.prev_sample
lowerCAmelCase_ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
if torch_device == "mps":
return
lowerCAmelCase_ : Tuple = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : Optional[int] = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase_ : Optional[int] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = output.prev_sample
lowerCAmelCase_ : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
if str(SCREAMING_SNAKE_CASE_ ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 289 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"text": Value("string" )} )
snake_case__ = Features({"summary": Value("string" )} )
snake_case__ = "text"
snake_case__ = "summary"
@property
def __lowerCAmelCase ( self : List[Any] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 98 | '''simple docstring'''
import requests
__a: str = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __UpperCamelCase ( UpperCAmelCase ):
# fetching a list of articles in json format
lowercase__ : Optional[Any] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 198 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase__ : Union[str, Any] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
lowercase__ : Any = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
lowercase__ : Optional[Any] = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def _snake_case ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[int]=None , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="auto" , lowercase_ : List[str]=-1 , lowercase_ : List[Any]=0.9 , lowercase_ : Union[str, Any]=5 , lowercase_ : List[str]=500 , lowercase_ : Any="gpt2-large" , lowercase_ : Any=-1 , lowercase_ : List[Any]=1024 , lowercase_ : Union[str, Any]=25 , lowercase_ : Dict=5 , lowercase_ : str=True , lowercase_ : List[str]=25 , ):
snake_case_ : Tuple = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ : List[Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''CLIPFeatureExtractor''']
lowercase__ : Any = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 155 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def UpperCamelCase__ ( self , __lowercase=0) -> Optional[int]:
__UpperCamelCase :Dict = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowercase))
__UpperCamelCase :List[Any] = np.random.RandomState(__lowercase)
__UpperCamelCase :Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Union[str, Any] = self.get_dummy_inputs()
__UpperCamelCase :Tuple = pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :List[str] = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Any = self.get_dummy_inputs()
__UpperCamelCase :List[Any] = pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :List[Any] = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
# warmup pass to apply optimizations
__UpperCamelCase :str = pipe(**self.get_dummy_inputs())
__UpperCamelCase :Optional[int] = self.get_dummy_inputs()
__UpperCamelCase :Optional[int] = pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Any = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :int = self.get_dummy_inputs()
__UpperCamelCase :int = pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :int = self.get_dummy_inputs()
__UpperCamelCase :List[str] = pipe(**__lowercase).images
__UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Dict = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
__UpperCamelCase :List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = self.get_dummy_inputs()
__UpperCamelCase :Dict = pipe(**__lowercase).images
__UpperCamelCase :Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCamelCase :Any = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[Any] = ort.SessionOptions()
__UpperCamelCase :str = False
return options
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCamelCase :Optional[Any] = init_image.resize((768, 512))
# using the PNDM scheduler by default
__UpperCamelCase :List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = '''A fantasy landscape, trending on artstation'''
__UpperCamelCase :Any = np.random.RandomState(0)
__UpperCamelCase :int = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :str = output.images
__UpperCamelCase :Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCamelCase :str = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
__UpperCamelCase :Tuple = init_image.resize((768, 512))
__UpperCamelCase :Optional[int] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''')
__UpperCamelCase :Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = '''A fantasy landscape, trending on artstation'''
__UpperCamelCase :str = np.random.RandomState(0)
__UpperCamelCase :List[str] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type='''np''' , )
__UpperCamelCase :Dict = output.images
__UpperCamelCase :Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCamelCase :Dict = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 43 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _SCREAMING_SNAKE_CASE ( *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=2 ):
from .. import __version__
A_ : Union[str, Any] = take_from
A_ : Optional[Any] = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE ):
A_ : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
A_ : List[Any] = None
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE ),)
A_ : Optional[Any] = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
values += (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
A_ : int = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
A_ : List[Any] = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
A_ : Union[str, Any] = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE , stacklevel=SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
A_ : Dict = inspect.getouterframes(inspect.currentframe() )[1]
A_ : Optional[int] = call_frame.filename
A_ : Optional[int] = call_frame.lineno
A_ : str = call_frame.function
A_ , A_ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
return
elif len(SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 186 | 0 |
"""simple docstring"""
from math import sqrt
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : int = 0
for i in range(1 ,int(sqrt(lowercase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase_ ):
total += i + n // i
elif i == sqrt(lowercase_ ):
total += i
return total - n
def lowercase__ ( lowercase_ = 10_000 ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = sum(
i
for i in range(1 ,lowercase_ )
if sum_of_divisors(sum_of_divisors(lowercase_ ) ) == i and sum_of_divisors(lowercase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 310 |
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : str = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _UpperCamelCase ( __A , __A ) -> List[str]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
return (preds == labels).mean()
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCamelCase__ = simple_accuracy(__A , __A )
UpperCamelCase__ = fa_score(y_true=__A , y_pred=__A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCamelCase__ = pearsonr(__A , __A )[0]
UpperCamelCase__ = spearmanr(__A , __A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCamelCase ( __A , __A , __A ) -> Dict:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
assert len(__A ) == len(__A ), F'''Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__A , __A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "mrpc":
return acc_and_fa(__A , __A )
elif task_name == "sts-b":
return pearson_and_spearman(__A , __A )
elif task_name == "qqp":
return acc_and_fa(__A , __A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__A , __A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__A , __A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "rte":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "hans":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
def _UpperCamelCase ( __A , __A , __A ) -> Any:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
if len(__A ) != len(__A ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool:
lowerCamelCase__ : List[Any] = str(UpperCamelCase )
return n == n[::-1]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> List[str]:
lowerCamelCase__ : List[str] = 0
for i in range(1 , UpperCamelCase ):
if is_palindrome(UpperCamelCase ) and is_palindrome(bin(UpperCamelCase ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 129 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase ( _lowercase ):
pass
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: Any ):
lowerCamelCase__ : Any = data
lowerCamelCase__ : Node | None = None
def __iter__( self: List[Any] ):
lowerCamelCase__ : Optional[Any] = self
lowerCamelCase__ : int = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase__ )
yield node.data
lowerCamelCase__ : List[str] = node.next_node
@property
def lowerCamelCase_ ( self: Optional[Any] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_A : Any =Node(1)
_A : Optional[int] =Node(2)
_A : Dict =Node(3)
_A : Optional[Any] =Node(4)
print(root_node.has_loop) # False
_A : Any =root_node.next_node
print(root_node.has_loop) # True
_A : Dict =Node(5)
_A : Union[str, Any] =Node(6)
_A : str =Node(5)
_A : int =Node(6)
print(root_node.has_loop) # False
_A : Optional[Any] =Node(1)
print(root_node.has_loop) # False
| 129 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
A_ :Dict = True
except ImportError:
A_ :Union[str, Any] = False
A_ :str = logging.get_logger(__name__) # pylint: disable=invalid-name
def A ( a_ ) -> int:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class __A ( a ):
"""simple docstring"""
@staticmethod
def __lowercase ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=lowerCamelCase__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=lowerCamelCase__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , *lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =testing
__UpperCamelCase : Any =testing_file
__UpperCamelCase : Union[str, Any] =path
def __lowercase ( self ):
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__UpperCamelCase : Optional[Any] =[directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
__UpperCamelCase : str =(
Path(lowerCamelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__UpperCamelCase : Dict =path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCamelCase__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
__UpperCamelCase : Dict =json.load(lowerCamelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCamelCase__ , extra_context=lowerCamelCase__ , )
__UpperCamelCase : Optional[Any] =[directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
__UpperCamelCase : str =json.load(lowerCamelCase__ )
__UpperCamelCase : List[str] =configuration['lowercase_modelname']
__UpperCamelCase : Dict =configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'{directory}/configuration.json' )
__UpperCamelCase : int ='PyTorch' in generate_tensorflow_pytorch_and_flax
__UpperCamelCase : int ='TensorFlow' in generate_tensorflow_pytorch_and_flax
__UpperCamelCase : List[Any] ='Flax' in generate_tensorflow_pytorch_and_flax
__UpperCamelCase : str =f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCamelCase__ )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w' ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowerCamelCase__ ):
with open(lowerCamelCase__ , 'r' ) as f:
__UpperCamelCase : List[str] =f.readlines()
with open(lowerCamelCase__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCamelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Create temp file
__UpperCamelCase , __UpperCamelCase : Tuple =mkstemp()
__UpperCamelCase : Optional[Any] =False
with fdopen(lowerCamelCase__ , 'w' ) as new_file:
with open(lowerCamelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCamelCase__ )
if line_to_copy_below in line:
__UpperCamelCase : List[Any] =True
for line_to_copy in lines_to_copy:
new_file.write(lowerCamelCase__ )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCamelCase__ , lowerCamelCase__ )
# Remove original file
remove(lowerCamelCase__ )
# Move new file
move(lowerCamelCase__ , lowerCamelCase__ )
def skip_units(lowerCamelCase__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase__ ):
with open(lowerCamelCase__ ) as datafile:
__UpperCamelCase : List[Any] =[]
__UpperCamelCase : Tuple =False
__UpperCamelCase : List[str] =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__UpperCamelCase : Optional[Any] =line.split('"' )[1]
__UpperCamelCase : Tuple =skip_units(lowerCamelCase__ )
elif "# Below: " in line and "##" not in line:
__UpperCamelCase : Optional[int] =line.split('"' )[1]
__UpperCamelCase : Tuple =skip_units(lowerCamelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =[]
elif "# Replace with" in line and "##" not in line:
__UpperCamelCase : List[str] =[]
elif "##" not in line:
lines_to_copy.append(lowerCamelCase__ )
remove(lowerCamelCase__ )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCamelCase__ )
| 71 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase_ = 2
class snake_case :
def __init__( self , *, # begin keyword-only arguments
__UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=None , ) ->Tuple:
a_ , a_ , a_ , a_ = bos, unk, pad, eos
a_ = []
a_ = []
a_ = {}
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase)
a_ = len(self.symbols)
def __eq__( self , __UpperCAmelCase) ->Dict:
return self.indices == other.indices
def __getitem__( self , __UpperCAmelCase) ->Optional[Any]:
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self) ->Any:
return len(self.symbols)
def __contains__( self , __UpperCAmelCase) ->Dict:
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase) ->List[Any]:
a_ = cls()
d.add_from_file(__UpperCAmelCase)
return d
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False) ->List[Any]:
if word in self.indices and not overwrite:
a_ = self.indices[word]
a_ = self.count[idx] + n
return idx
else:
a_ = len(self.symbols)
a_ = idx
self.symbols.append(__UpperCAmelCase)
self.count.append(__UpperCAmelCase)
return idx
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
return 0
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
try:
with open(__UpperCAmelCase , "r" , encoding="utf-8") as fd:
self.add_from_file(__UpperCAmelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__UpperCAmelCase))
return
a_ = f.readlines()
a_ = self._load_meta(__UpperCAmelCase)
for line in lines[indices_start_line:]:
try:
a_ , a_ = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
a_ = True
a_ , a_ = line.rsplit(" " , 1)
else:
a_ = False
a_ = int(__UpperCAmelCase)
a_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(__UpperCAmelCase))
self.add_symbol(__UpperCAmelCase , n=__UpperCAmelCase , overwrite=__UpperCAmelCase)
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = dict((re.sub(r"@@$" , "" , UpperCAmelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , UpperCAmelCase ), v) for k, v in d.items() )
a_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
a_ = d[k] # restore
return da
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a_ = os.path.join(UpperCAmelCase , "checkpoint.pt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
a_ = torch.load(UpperCAmelCase , map_location="cpu" )
a_ = chkpt["cfg"]["model"]
# dicts
a_ = os.path.join(UpperCAmelCase , "dict.txt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
a_ = Dictionary.load(UpperCAmelCase )
a_ = rewrite_dict_keys(src_dict.indices )
a_ = len(UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
a_ = os.path.join(UpperCAmelCase , "bpecodes" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
a_ = os.path.join(UpperCAmelCase , "config.json" )
a_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
a_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
a_ = chkpt["model"]
# remove unneeded keys
a_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
a_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
a_ = model_state_dict.pop(UpperCAmelCase )
else:
a_ = model_state_dict.pop(UpperCAmelCase )
a_ = BioGptConfig.from_pretrained(UpperCAmelCase )
a_ = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("Conversion is done!" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 243 | 0 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
def _A ( _a : Tuple , _a : Any , _a : int ):
"""simple docstring"""
A = os.path.abspath(_a )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
A = tf.train.list_variables(_a )
A = []
A = []
A = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
A = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
A = name[1:]
# figure out how many levels deep the name is
A = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(_a )
# read data
A = tf.train.load_variable(_a , _a )
names.append("""/""".join(_a ) )
arrays.append(_a )
logger.info(f'Read a total of {len(_a ):,} layers' )
# Sanity check
if len(set(_a ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(_a ) )})' )
A = list(set(_a ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(_a , _a ):
A = full_name.split("""/""" )
A = model
A = []
for i, m_name in enumerate(_a ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
A = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
A = getattr(_a , """embeddings""" )
A = getattr(_a , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
A = getattr(_a , """encoder""" )
A = getattr(_a , """layer""" )
A = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
A = getattr(_a , """pooler""" )
A = getattr(_a , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
A = getattr(_a , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
A = getattr(_a , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
A = getattr(_a , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
A = getattr(_a , """token_type_embeddings""" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
A = getattr(_a , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
A = getattr(_a , """attention""" )
A = getattr(_a , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
A = getattr(_a , """attention""" )
A = getattr(_a , """output""" )
A = getattr(_a , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
A = getattr(_a , """attention""" )
A = getattr(_a , """output""" )
A = getattr(_a , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
A = getattr(_a , """output""" )
A = getattr(_a , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
A = getattr(_a , """output""" )
A = getattr(_a , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
A = getattr(_a , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
A = getattr(_a , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
A = getattr(_a , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
A = getattr(_a , """intermediate""" )
A = getattr(_a , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
A = getattr(_a , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
A = getattr(_a , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
A = getattr(_a , """weight""" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
A = """.""".join(_a )
if re.match(r"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , _a ) or re.match(
r"""(\S+)\.attention\.output\.dense\.weight""" , _a ):
A = array.reshape(pointer.data.shape )
if "kernel" in full_name:
A = array.transpose()
if pointer.shape == array.shape:
A = torch.from_numpy(_a )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _A ( _a : Any , _a : int , _a : str ):
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
A = BertConfig.from_json_file(_a )
A = BertModel(_a )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_a , _a , _a )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
UpperCAmelCase =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 357 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_2 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=9_9 ,lowerCamelCase_=3_2 ,lowerCamelCase_=3_2 ,lowerCamelCase_=2 ,lowerCamelCase_=4 ,lowerCamelCase_=3_7 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=0 ,lowerCamelCase_=None ,) -> List[str]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_labels
A = vocab_size
A = hidden_size
A = projection_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = dropout
A = attention_dropout
A = max_position_embeddings
A = initializer_range
A = scope
A = bos_token_id
def UpperCamelCase__ ( self ) -> Tuple:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A = input_mask.numpy()
A , A = input_mask.shape
A = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase_ ):
A = 1
A = 0
A = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = TFBlipTextModel(config=lowerCamelCase_ )
A = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,training=lowerCamelCase_ )
A = model(lowerCamelCase_ ,training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> List[str]:
A = BlipTextModelTester(self )
A = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def UpperCamelCase__ ( self ) -> str:
pass
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFBlipTextModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_=True ) -> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase_ )
| 77 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "falcon"
lowercase = ["past_key_values"]
def __init__( self : List[Any] , snake_case_ : List[str]=65_024 , snake_case_ : str=4_544 , snake_case_ : int=32 , snake_case_ : str=71 , snake_case_ : Union[str, Any]=1E-5 , snake_case_ : Tuple=0.02 , snake_case_ : Dict=True , snake_case_ : Optional[int]=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : List[str]=None , snake_case_ : str=False , snake_case_ : Any=False , snake_case_ : Dict=True , snake_case_ : Tuple=True , snake_case_ : Tuple=False , snake_case_ : int=11 , snake_case_ : Tuple=11 , **snake_case_ : Union[str, Any] , ):
snake_case__ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : Any = kwargs.pop("""n_embed""" , snake_case_ )
snake_case__ : Optional[int] = hidden_size if n_embed is None else n_embed
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : Tuple = layer_norm_epsilon
snake_case__ : Optional[Any] = initializer_range
snake_case__ : List[str] = use_cache
snake_case__ : Optional[int] = hidden_dropout
snake_case__ : Tuple = attention_dropout
snake_case__ : Optional[int] = bos_token_id
snake_case__ : List[str] = eos_token_id
snake_case__ : Dict = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case__ : Optional[Any] = alibi
snake_case__ : List[Any] = new_decoder_architecture
snake_case__ : int = multi_query # Ignored when new_decoder_architecture is True
snake_case__ : Optional[Any] = parallel_attn
snake_case__ : int = bias
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@property
def lowerCamelCase ( self : Any ):
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : str ):
return not self.alibi
| 35 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {"""vocab_file""": """spiece.model"""}
__UpperCamelCase : Any = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : Tuple = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__UpperCamelCase : Optional[Any] = """▁"""
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any]="</s>" , UpperCamelCase: Tuple="<unk>" , UpperCamelCase: Optional[int]="<pad>" , UpperCamelCase: List[str]=1_00 , UpperCamelCase: Dict=None , UpperCamelCase: Optional[Dict[str, Any]] = None , UpperCamelCase: Tuple=True , **UpperCamelCase: Dict , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
snake_case__ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case__ = len(set(filter(lambda UpperCamelCase : bool('extra_id' in str(UpperCamelCase ) ) , UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
snake_case__ = legacy
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCamelCase , **UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = extra_ids
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: List[Any] ) -> Any:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , UpperCamelCase , )
return max_model_length
@property
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None , UpperCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return list(
set(filter(lambda UpperCamelCase : bool(re.search(R'<extra_id_\d+>' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
return [self._convert_token_to_id(UpperCamelCase ) for token in self.get_sentinel_tokens()]
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[int] ) -> List[int]:
if len(UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
snake_case__ = self._add_eos_if_not_present(UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self: Union[str, Any] ) -> List[str]:
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self: Optional[int] , UpperCamelCase: int ) -> List[str]:
snake_case__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self: str , UpperCamelCase: "TextInput" , **UpperCamelCase: Dict ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
snake_case__ = SPIECE_UNDERLINE + text.replace(UpperCamelCase , ' ' )
return super().tokenize(UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , **UpperCamelCase: str ) -> str:
if not self.legacy:
snake_case__ = text.startswith(UpperCamelCase )
if is_first:
snake_case__ = text[1:]
snake_case__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(UpperCamelCase ):
snake_case__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[int] ) -> Dict:
if token.startswith('<extra_id_' ):
snake_case__ = re.match(R'<extra_id_(\d+)>' , UpperCamelCase )
snake_case__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> Tuple:
if index < self.sp_model.get_piece_size():
snake_case__ = self.sp_model.IdToPiece(UpperCamelCase )
else:
snake_case__ = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: Any ) -> Dict:
snake_case__ = []
snake_case__ = ''
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(UpperCamelCase )
snake_case__ = False
out_string += self.sp_model.decode(UpperCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , 'wb' ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 307 | 0 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 83 |
'''simple docstring'''
import requests
lowercase__ = "" # <-- Put your OpenWeatherMap appid here!
lowercase__ = "https://api.openweathermap.org/data/2.5/"
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "Chicago" , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = "Kolkata, India" , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 55.68 , SCREAMING_SNAKE_CASE__ = 12.57 , SCREAMING_SNAKE_CASE__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowercase__ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 83 | 1 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase = 6_3_7_8_1_3_7.0
lowerCamelCase = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCamelCase = 637_8137
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__lowercase =atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
__lowercase =atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__lowercase =haversine_distance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__lowercase =(b_lata + b_lata) / 2
__lowercase =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__lowercase =(sin(_lowerCAmelCase ) ** 2) * (cos(_lowerCAmelCase ) ** 2)
__lowercase =cos(sigma / 2 ) ** 2
__lowercase =(sigma - sin(_lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__lowercase =(cos(_lowerCAmelCase ) ** 2) * (sin(_lowerCAmelCase ) ** 2)
__lowercase =sin(sigma / 2 ) ** 2
__lowercase =(sigma + sin(_lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 166 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 ) -> Optional[int]:
'''simple docstring'''
_snake_case = AutoTokenizer.from_pretrained('bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case = 16
elif accelerator.mixed_precision != "no":
_snake_case = 8
else:
_snake_case = None
return tokenizer.pad(
UpperCamelCase__ , padding='longest' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
_snake_case = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
_snake_case = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase__ ) == "1":
_snake_case = 2
# New Code #
_snake_case = int(args.gradient_accumulation_steps )
# Initialize accelerator
_snake_case = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCamelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case = config['lr']
_snake_case = int(config['num_epochs'] )
_snake_case = int(config['seed'] )
_snake_case = int(config['batch_size'] )
_snake_case = evaluate.load('glue' , 'mrpc' )
set_seed(UpperCamelCase__ )
_snake_case , _snake_case = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case = model.to(accelerator.device )
# Instantiate optimizer
_snake_case = AdamW(params=model.parameters() , lr=UpperCamelCase__ )
# Instantiate scheduler
_snake_case = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ ):
model.train()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCamelCase__ ):
_snake_case = model(**UpperCamelCase__ )
_snake_case = output.loss
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case = model(**UpperCamelCase__ )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase__ , references=UpperCamelCase__ , )
_snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Dict:
'''simple docstring'''
_snake_case = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
_snake_case = parser.parse_args()
_snake_case = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 295 |
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 308 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=10_000 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Tuple=2_048 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any="relu" , _lowerCAmelCase : Any=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[int]=6_000 , _lowerCAmelCase : Tuple=1_024 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=(5, 5) , _lowerCAmelCase : Optional[int]=1_024 , _lowerCAmelCase : List[Any]=80 , _lowerCAmelCase : List[Any]=1 , **_lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 210 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> int:
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ):
return method
def wrapper(self : Optional[int] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any] ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCAmelCase , **__UpperCAmelCase )
return wrapper | 210 | 1 |
import copy
import re
class A__ :
_UpperCAmelCase :Union[str, Any] = '''hp'''
_UpperCAmelCase :str = {}
_UpperCAmelCase :List[Any] = None
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = prefix
UpperCamelCase : List[str] = defaults
cls.build_naming_info()
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return ""
UpperCamelCase : List[str] = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
UpperCamelCase : Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCamelCase : List[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ ):
UpperCamelCase : int = ""
while integer != 0:
UpperCamelCase : Union[str, Any] = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCamelCase : List[Any] = 0
while True:
UpperCamelCase : int = word + "#" + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCamelCase : List[Any] = sword
break
UpperCamelCase : Dict = short_word
UpperCamelCase : Optional[int] = word
return short_word
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = param_name.split("_" )
UpperCamelCase : int = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCamelCase : int = ["", "_"]
for separator in separators:
UpperCamelCase : str = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
UpperCamelCase : List[Any] = shortname
UpperCamelCase : str = param_name
return shortname
return param_name
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase : str = short_name
UpperCamelCase : int = param_name
@classmethod
def __UpperCamelCase( cls ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
UpperCamelCase : Dict = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCamelCase : Tuple = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase : int = info
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCamelCase : Optional[int] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCamelCase : Any = cls.NAMING_INFO["short_param"][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase : str = 1 if v else 0
UpperCamelCase : str = "" if isinstance(_UpperCAmelCase , (int, float) ) else "-"
UpperCamelCase : int = F"""{key}{sep}{v}"""
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def __UpperCamelCase( cls , A_ ):
'''simple docstring'''
UpperCamelCase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCamelCase : List[Any] = []
else:
UpperCamelCase : Dict = repr.split("_" )
UpperCamelCase : List[str] = {}
for value in values:
if "-" in value:
UpperCamelCase , UpperCamelCase : Union[str, Any] = value.split("-" )
else:
UpperCamelCase : Optional[int] = re.sub("[0-9.]" , "" , _UpperCAmelCase )
UpperCamelCase : Union[str, Any] = float(re.sub("[^0-9.]" , "" , _UpperCAmelCase ) )
UpperCamelCase : List[Any] = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCamelCase : Any = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCamelCase : Any = cls.DEFAULTS[k]
return parameters
| 52 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : Optional[str] = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} )
UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase_ ( self : Dict ):
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
UpperCAmelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase : float = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : float = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _snake_case , _snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0:
_A = ds['train'].train_test_split(data_args.train_val_split )
_A = split['train']
_A = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case )
elif model_args.model_name_or_path:
_A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case )
else:
_A = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_A = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_A = ViTMAEForPreTraining(_snake_case )
if training_args.do_train:
_A = ds['train'].column_names
else:
_A = ds['validation'].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = 'image'
elif "img" in column_names:
_A = 'img'
else:
_A = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_A = image_processor.size['shortest_edge']
else:
_A = (image_processor.size['height'], image_processor.size['width'])
_A = Compose(
[
Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_snake_case : List[Any] ):
_A = [transforms(_snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_A = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_snake_case )
# Compute absolute learning rate
_A = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_A = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_A = Trainer(
model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=_snake_case )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
# Write model card and (optionally) push to hub
_A = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def _snake_case ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 315 | 0 |
"""simple docstring"""
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 369 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : int = pd.read_csv('sample_data.csv', header=None)
_lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : Optional[int] = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : Any = MinMaxScaler().fit_transform(actual_data)
_lowercase : Dict = 10
_lowercase : List[str] = 5
_lowercase : Any = 20
_lowercase : Optional[int] = len_data - periods * look_back
_lowercase : Optional[int] = actual_data[:division]
_lowercase : Optional[int] = actual_data[division - look_back :]
_lowercase ,_lowercase : Tuple = [], []
_lowercase ,_lowercase : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : List[str] = np.array(train_x)
_lowercase : str = np.array(test_x)
_lowercase : Union[str, Any] = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : str = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
_lowercase : str = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
_lowercase : str = model.predict(x_test)
| 86 | 0 |
import argparse
import os
import re
UpperCamelCase = '''src/transformers'''
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(R'''\[([^\]]+)\]''')
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : str = _re_indent.search(_lowerCamelCase)
return "" if search is None else search.groups()[0]
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None):
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = code.split("\n")
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase):
index += 1
lowercase__ : str = ["\n".join(lines[:index])]
else:
lowercase__ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ : Any = [lines[index]]
index += 1
while index < len(_lowerCamelCase) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(_lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "):
current_block.append(lines[index])
blocks.append("\n".join(_lowerCamelCase))
if index < len(_lowerCamelCase) - 1:
lowercase__ : List[str] = [lines[index + 1]]
index += 1
else:
lowercase__ : List[str] = []
else:
blocks.append("\n".join(_lowerCamelCase))
lowercase__ : List[str] = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase) > 0:
blocks.append("\n".join(_lowerCamelCase))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase):
blocks.append("\n".join(lines[index:]))
return blocks
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _inner(_lowerCamelCase : str):
return key(_lowerCamelCase).lower().replace("_" , "")
return _inner
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None):
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : str):
return x
if key is None:
lowercase__ : Any = noop
# Constants are all uppercase, they go first.
lowercase__ : Tuple = [obj for obj in objects if key(_lowerCamelCase).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ : str = [obj for obj in objects if key(_lowerCamelCase)[0].isupper() and not key(_lowerCamelCase).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ : Any = [obj for obj in objects if not key(_lowerCamelCase)[0].isupper()]
lowercase__ : Dict = ignore_underscore(_lowerCamelCase)
return sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any]):
lowercase__ : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowercase__ : Optional[int] = [part.strip().replace("\"" , "") for part in imports.split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowercase__ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) + "]"
lowercase__ : List[Any] = import_statement.split("\n")
if len(_lowerCamelCase) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ : Dict = 2 if lines[1].strip() == "[" else 1
lowercase__ : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
lowercase__ : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])
lowercase__ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(_lowerCamelCase) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
lowercase__ : Any = _re_bracket_content.sub(_replace , lines[1])
else:
lowercase__ : List[Any] = [part.strip().replace("\"" , "") for part in lines[1].split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowercase__ : Optional[Any] = keys[:-1]
lowercase__ : Optional[Any] = get_indent(lines[1]) + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)])
return "\n".join(_lowerCamelCase)
else:
# Finally we have to deal with imports fitting on one line
lowercase__ : Any = _re_bracket_content.sub(_replace , _lowerCamelCase)
return import_statement
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True):
with open(_lowerCamelCase , encoding="utf-8") as f:
lowercase__ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ : List[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:")
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ : Optional[int] = main_blocks[block_idx]
lowercase__ : Any = block.split("\n")
# Get to the start of the imports.
lowercase__ : int = 0
while line_idx < len(_lowerCamelCase) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ : List[str] = len(_lowerCamelCase)
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ : str = "\n".join(block_lines[line_idx:-1])
lowercase__ : Optional[Any] = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
lowercase__ : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase)
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ : Dict = [(pattern.search(_lowerCamelCase).groups()[0] if pattern.search(_lowerCamelCase) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase) if key is not None]
lowercase__ : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ : Tuple = 0
lowercase__ : Tuple = []
for i in range(len(_lowerCamelCase)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
lowercase__ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(_lowerCamelCase)
count += 1
# And we put our main block back together with its first and last line.
lowercase__ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(_lowerCamelCase):
if check_only:
return True
else:
print(f'''Overwriting {file}.''')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write("\n".join(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : List[Any]=True):
lowercase__ : Optional[int] = []
for root, _, files in os.walk(_lowerCamelCase):
if "__init__.py" in files:
lowercase__ : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , "__init__.py") , check_only=_lowerCamelCase)
if result:
lowercase__ : List[str] = [os.path.join(_lowerCamelCase , "__init__.py")]
if len(_lowerCamelCase) > 0:
raise ValueError(f'''Would overwrite {len(_lowerCamelCase)} files, run `make style`.''')
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 87 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase__ : List[Any] = 25_00_04
lowercase__ : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = MBartTokenizer
_snake_case : Tuple = MBartTokenizerFast
_snake_case : List[str] = True
_snake_case : Optional[Any] = True
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = 'facebook/mbart-large-en-ro'
_snake_case : Dict = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_snake_case : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def snake_case__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCamelCase = 1
return cls
def snake_case__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
_UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
_UpperCamelCase = 10
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
_UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
_UpperCamelCase = targets['''input_ids''']
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 250004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 324 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase = trt.Logger(trt.Logger.WARNING)
lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCamelCase = args.per_device_eval_batch_size
lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase = True
lowerCamelCase = """temp_engine/bert-fp32.engine"""
if args.fpaa:
lowerCamelCase = """temp_engine/bert-fp16.engine"""
if args.inta:
lowerCamelCase = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = np.asarray(inputs["input_ids"] , dtype=np.intaa )
UpperCAmelCase_ = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
UpperCAmelCase_ = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase__ )
# start time
UpperCAmelCase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase__ ) for d_inp in d_inputs] + [int(lowerCAmelCase__ ), int(lowerCAmelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
cuda.memcpy_dtoh_async(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = end_time - start_time
UpperCAmelCase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase = raw_datasets["""validation"""].column_names
lowerCamelCase = """question""" if """question""" in column_names else column_names[0]
lowerCamelCase = """context""" if """context""" in column_names else column_names[1]
lowerCamelCase = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def a__ ( lowerCAmelCase__ ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
UpperCAmelCase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCAmelCase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=lowerCAmelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCAmelCase_ = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCAmelCase_ = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCAmelCase_ = tokenized_examples.sequence_ids(lowerCAmelCase__ )
UpperCAmelCase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCAmelCase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCAmelCase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
lowerCamelCase = raw_datasets["""validation"""]
# Validation Feature Creation
lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCamelCase = default_data_collator
lowerCamelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
UpperCAmelCase_ = postprocess_qa_predictions(
examples=lowerCAmelCase__ , features=lowerCAmelCase__ , predictions=lowerCAmelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCAmelCase_ = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
UpperCAmelCase_ = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
UpperCAmelCase_ = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase__ , label_ids=lowerCAmelCase__ )
lowerCamelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a__ ( lowerCAmelCase__ ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase__ ) ) * engine.get_binding_dtype(lowerCAmelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
lowerCamelCase = 0.0
lowerCamelCase = 0
lowerCamelCase = timeit.default_timer()
lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase , lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase , lowerCamelCase = outputs
lowerCamelCase = torch.tensor(start_logits)
lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_000))
logger.info("""Total Number of Inference = %d""", niter)
lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 369 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
@slow
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )["last_hidden_state"].detach()
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCAmelCase , atol=1e-3 ) )
| 241 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.