code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase ( a_ , unittest.TestCase ):
__lowercase = ShapEPipeline
__lowercase = ["""prompt"""]
__lowercase = ["""prompt"""]
__lowercase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowercase = False
@property
def UpperCAmelCase_ ( self :int )-> Any:
return 32
@property
def UpperCAmelCase_ ( self :List[str] )-> Dict:
return 32
@property
def UpperCAmelCase_ ( self :List[str] )-> Any:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
return 8
@property
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[int]:
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_a )
@property
def UpperCAmelCase_ ( self :List[str] )-> List[str]:
torch.manual_seed(0 )
A__ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
A__ = PriorTransformer(**_a )
return model
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> str:
torch.manual_seed(0 )
A__ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
A__ = ShapERenderer(**_a )
return model
def UpperCAmelCase_ ( self :int )-> List[str]:
A__ = self.dummy_prior
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_renderer
A__ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
A__ = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int , lowercase_ :Optional[int]=0 )-> List[Any]:
if str(_a ).startswith("mps" ):
A__ = torch.manual_seed(_a )
else:
A__ = torch.Generator(device=_a ).manual_seed(_a )
A__ = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self :List[Any] )-> int:
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_a )
A__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A__ = pipe(**self.get_dummy_inputs(_a ) )
A__ = output.images[0]
A__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self :Any )-> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self :List[str] )-> Union[str, Any]:
A__ = torch_device == "cpu"
A__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]:
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_a )
A__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A__ = 1
A__ = 2
A__ = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
A__ = batch_size * [inputs[key]]
A__ = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Union[str, Any] )-> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
A__ = ShapEPipeline.from_pretrained("openai/shap-e" )
A__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A__ = torch.Generator(device=_a ).manual_seed(0 )
A__ = pipe(
"a shark" , generator=_a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 237
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[Any] = logging.get_logger(__name__)
class a__ ( a_ ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = True , **_a , ):
super().__init__(**_a )
lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 224}
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
lowercase : str = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase : List[str] = get_size_dict(_a , param_name="crop_size" )
lowercase : int = do_resize
lowercase : Optional[int] = size
lowercase : str = resample
lowercase : List[Any] = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Optional[int] = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : Optional[Any] = do_flip_channel_order
def __magic_name__ ( self , _a , _a , _a = PIL.Image.BILINEAR , _a = None , **_a , ):
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase : Union[str, Any] = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
lowercase : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a = None ):
return flip_channel_order(_a , data_format=_a )
def __magic_name__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[int] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_a , default_to_square=_a )
lowercase : int = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(_a , param_name="crop_size" )
lowercase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
lowercase : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowercase : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowercase : Union[str, Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase : int = [self.flip_channel_order(image=_a ) for image in images]
lowercase : int = [to_channel_dimension_format(_a , _a ) for image in images]
lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
lowercase : Tuple = target_sizes.numpy()
lowercase : List[Any] = []
for idx in range(len(_a ) ):
lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowercase : str = logits.argmax(dim=1 )
lowercase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 202
| 0
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__UpperCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__UpperCamelCase : str = "xvjiarui/stable-diffusion-2-inpainting"
__UpperCamelCase , __UpperCamelCase : List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(__UpperCamelCase , safety_checker=__UpperCamelCase )
__UpperCamelCase : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
__UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
__UpperCamelCase : List[str] = 50
__UpperCamelCase : int = jax.device_count()
__UpperCamelCase : List[str] = num_samples * [prompt]
__UpperCamelCase : Optional[Any] = num_samples * [init_image]
__UpperCamelCase : Optional[Any] = num_samples * [mask_image]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = pipeline.prepare_inputs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# shard inputs and rng
__UpperCamelCase : Dict = replicate(__UpperCamelCase )
__UpperCamelCase : Optional[int] = jax.random.split(__UpperCamelCase , jax.device_count() )
__UpperCamelCase : Optional[int] = shard(__UpperCamelCase )
__UpperCamelCase : Dict = shard(__UpperCamelCase )
__UpperCamelCase : Tuple = shard(__UpperCamelCase )
__UpperCamelCase : int = pipeline(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase )
__UpperCamelCase : Tuple = output.images.reshape(__UpperCamelCase , 5_12 , 5_12 , 3 )
__UpperCamelCase : Any = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCamelCase : List[Any] = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 171
|
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00 ):
__UpperCamelCase : Tuple = n * (n + 1) * (2 * n + 1) / 6
__UpperCamelCase : List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 171
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "data2vec-text"
def __init__( self , __lowerCamelCase=3_0_5_2_2 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase)
_A : Union[str, Any] = vocab_size
_A : List[str] = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : str = num_attention_heads
_A : Union[str, Any] = hidden_act
_A : List[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : Union[str, Any] = initializer_range
_A : Tuple = layer_norm_eps
_A : List[Any] = position_embedding_type
_A : Tuple = use_cache
_A : Optional[int] = classifier_dropout
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_A : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 11
|
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11
| 1
|
def __lowerCamelCase ( __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 350
|
"""simple docstring"""
import qiskit
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCAmelCase_ : int = qiskit.Aer.get_backend("aer_simulator" )
lowerCAmelCase_ : List[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : Tuple = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 161
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : int = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
a : str = {
"google/realm-cc-news-pretrained-embedder": 5_12,
"google/realm-cc-news-pretrained-encoder": 5_12,
"google/realm-cc-news-pretrained-scorer": 5_12,
"google/realm-cc-news-pretrained-openqa": 5_12,
"google/realm-orqa-nq-openqa": 5_12,
"google/realm-orqa-nq-reader": 5_12,
"google/realm-orqa-wq-openqa": 5_12,
"google/realm-orqa-wq-reader": 5_12,
}
a : int = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = RealmTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase : List[str] = getattr(snake_case , normalizer_state.pop("type" ) )
UpperCAmelCase : Optional[int] = do_lower_case
UpperCAmelCase : List[Any] = strip_accents
UpperCAmelCase : str = tokenize_chinese_chars
UpperCAmelCase : Tuple = normalizer_class(**snake_case )
UpperCAmelCase : str = do_lower_case
def A_ ( self , snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = PaddingStrategy.MAX_LENGTH
UpperCAmelCase : int = text
UpperCAmelCase : List[str] = kwargs.pop("text_pair" , snake_case )
UpperCAmelCase : Dict = kwargs.pop("return_tensors" , snake_case )
UpperCAmelCase : str = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(snake_case ):
if batch_text_pair is not None:
UpperCAmelCase : Any = batch_text_pair[idx]
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : List[str] = super().__call__(snake_case , snake_case , return_tensors=snake_case , **snake_case )
UpperCAmelCase : Dict = encoded_candidates.get("input_ids" )
UpperCAmelCase : Any = encoded_candidates.get("attention_mask" )
UpperCAmelCase : str = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case )
UpperCAmelCase : int = {key: item for key, item in output_data.items() if len(snake_case ) != 0}
return BatchEncoding(snake_case , tensor_type=snake_case )
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 311
|
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 1
|
from __future__ import annotations
from math import pi
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 1
|
from __future__ import annotations
def _a ( lowerCamelCase: list , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: int ) -> list:
'''simple docstring'''
__A = []
__A , __A = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__A = result + left + right
return input_list
def _a ( lowerCamelCase: list ) -> list:
'''simple docstring'''
if len(lowerCamelCase ) <= 1:
return input_list
__A = list(lowerCamelCase )
# iteration for two-way merging
__A = 2
while p <= len(lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
__A = i
__A = i + p - 1
__A = (low + high + 1) // 2
__A = merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(lowerCamelCase ):
__A = i
__A = merge(lowerCamelCase , 0 , lowerCamelCase , len(lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
snake_case__ : Any = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
snake_case__ : str = []
else:
snake_case__ : str = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 117
|
from __future__ import annotations
def _a ( lowerCamelCase: list[float] , lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCamelCase ):
print(F"""{i}\t\t{d}""" )
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: list[float] , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _a ( lowerCamelCase: list[dict[str, int]] , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: int ) -> list[float]:
'''simple docstring'''
__A = [float('''inf''' )] * vertex_count
__A = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCamelCase ):
__A , __A , __A = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__A = distance[u] + w
__A = check_negative_cycle(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Dict = int(input('Enter number of vertices: ').strip())
snake_case__ : Optional[int] = int(input('Enter number of edges: ').strip())
snake_case__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('Edge ', i + 1)
snake_case__ , snake_case__ , snake_case__ : Dict = (
int(x)
for x in input('Enter source, destination, weight: ').strip().split(' ')
)
snake_case__ : List[Any] = {'src': src, 'dst': dest, 'weight': weight}
snake_case__ : Union[str, Any] = int(input('\nEnter shortest path source:').strip())
snake_case__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 117
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a__ = pytest.mark.integration
@require_faiss
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : Tuple = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(_a ) for x in np.arange(3_0 ).tolist()]} )
return dset
def __lowercase ( self ) -> List[Any]:
import faiss
_a : Dataset = self._create_dummy_dataset()
_a : Union[str, Any] = dset.map(
lambda _a , _a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_a , keep_in_memory=_a )
_a : List[str] = dset.add_faiss_index('''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
_a , _a : Optional[Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowercase ( self ) -> Optional[int]:
import faiss
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_a , _a : Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowercase ( self ) -> str:
import faiss
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
_a , _a : str = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowercase ( self ) -> Optional[int]:
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(_a , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowercase ( self ) -> Any:
from elasticsearch import Elasticsearch
_a : Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_a : Dict = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
_a : Tuple = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
_a : Optional[int] = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=_a )
_a , _a : Optional[Any] = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
import faiss
_a : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
_a : Optional[int] = np.zeros(5 , dtype=np.floataa )
_a : Optional[int] = 1
_a , _a : int = index.search(_a )
self.assertRaises(_a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_a : Dict = np.eye(5 , dtype=np.floataa )[::-1]
_a , _a : Any = index.search_batch(_a )
self.assertRaises(_a , index.search_batch , queries[0] )
_a : Dict = [scores[0] for scores in total_scores]
_a : Any = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _a )
def __lowercase ( self ) -> str:
import faiss
_a : int = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_a : Optional[Any] = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_a ):
_a : List[str] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowercase ( self ) -> str:
import faiss
_a : int = faiss.IndexFlat(5 )
_a : str = FaissIndex(custom_index=_a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowercase ( self ) -> str:
import faiss
_a : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
index.save(tmp_file.name )
_a : Union[str, Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_a : List[Any] = np.zeros(5 , dtype=np.floataa )
_a : int = 1
_a , _a : Union[str, Any] = index.search(_a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __UpperCAmelCase ( __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
import faiss
_a : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
_a : str = '''index.faiss'''
_a : int = F"""mock://{index_name}"""
index.save(__a ,storage_options=mockfs.storage_options )
_a : int = FaissIndex.load(__a ,storage_options=mockfs.storage_options )
_a : Tuple = np.zeros(5 ,dtype=np.floataa )
_a : str = 1
_a , _a : Union[str, Any] = index.search(__a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_a : int = Elasticsearch()
_a : List[Any] = {'''acknowledged''': True}
_a : List[str] = ElasticSearchIndex(es_client=_a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_a : str = '''foo'''
_a : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_a , _a : Optional[Any] = index.search(_a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_a : Any = '''foo'''
_a : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_a , _a : Optional[int] = index.search(_a , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_a : str = ['''foo''', '''bar''', '''foobar''']
_a : Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_a , _a : Union[str, Any] = index.search_batch(_a )
_a : Union[str, Any] = [scores[0] for scores in total_scores]
_a : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
# batched queries with timeout
_a : Tuple = ['''foo''', '''bar''', '''foobar''']
_a : str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_a , _a : int = index.search_batch(_a , request_timeout=3_0 )
_a : Tuple = [scores[0] for scores in total_scores]
_a : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
| 15
|
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
_a : List[Any] = _modexpt(__a ,exponent // 2 ,__a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a ,exponent - 1 ,__a )) % modulo_value
def __UpperCAmelCase ( __a : int = 1_777 ,__a : int = 1_855 ,__a : int = 8 ) -> int:
"""simple docstring"""
_a : List[Any] = base
for _ in range(1 ,__a ):
_a : Any = _modexpt(__a ,__a ,10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ : Any = 2
class SCREAMING_SNAKE_CASE :
def __init__( self , *, # begin keyword-only arguments
_UpperCAmelCase="<s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=None , ):
'''simple docstring'''
__A ,__A ,__A ,__A : Union[str, Any] = bos, unk, pad, eos
__A : int = []
__A : Dict = []
__A : Union[str, Any] = {}
__A : int = self.add_symbol(_UpperCAmelCase)
__A : List[str] = self.add_symbol(_UpperCAmelCase)
__A : Dict = self.add_symbol(_UpperCAmelCase)
__A : Any = self.add_symbol(_UpperCAmelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_UpperCAmelCase)
__A : List[Any] = len(self.symbols)
def __eq__( self , _UpperCAmelCase):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self):
'''simple docstring'''
return len(self.symbols)
def __contains__( self , _UpperCAmelCase):
'''simple docstring'''
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase):
'''simple docstring'''
__A : int = cls()
d.add_from_file(_UpperCAmelCase)
return d
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase=1 , _UpperCAmelCase=False):
'''simple docstring'''
if word in self.indices and not overwrite:
__A : List[str] = self.indices[word]
__A : Optional[Any] = self.count[idx] + n
return idx
else:
__A : Optional[Any] = len(self.symbols)
__A : int = idx
self.symbols.append(_UpperCAmelCase)
self.count.append(_UpperCAmelCase)
return idx
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return 0
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
try:
with open(_UpperCAmelCase , 'r' , encoding='utf-8') as fd:
self.add_from_file(_UpperCAmelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(_UpperCAmelCase))
return
__A : Optional[int] = f.readlines()
__A : Dict = self._load_meta(_UpperCAmelCase)
for line in lines[indices_start_line:]:
try:
__A ,__A : str = line.rstrip().rsplit(' ' , 1)
if field == "#fairseq:overwrite":
__A : Dict = True
__A ,__A : List[str] = line.rsplit(' ' , 1)
else:
__A : List[Any] = False
__A : Dict = int(_UpperCAmelCase)
__A : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(_UpperCAmelCase))
self.add_symbol(_UpperCAmelCase , n=_UpperCAmelCase , overwrite=_UpperCAmelCase)
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'')
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Dict:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__A : List[str] = dict((re.sub(r'@@$' , '' , __snake_case ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , __snake_case ), v) for k, v in d.items() )
__A : str = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
__A : Union[str, Any] = d[k] # restore
return da
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Optional[Any]:
# prep
if not os.path.exists(__snake_case ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__A : str = os.path.join(__snake_case , 'checkpoint.pt' )
if not os.path.isfile(__snake_case ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
__A : str = torch.load(__snake_case , map_location='cpu' )
__A : Optional[Any] = chkpt['cfg']['model']
# dicts
__A : List[str] = os.path.join(__snake_case , 'dict.txt' )
if not os.path.isfile(__snake_case ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
__A : List[str] = Dictionary.load(__snake_case )
__A : str = rewrite_dict_keys(src_dict.indices )
__A : List[Any] = len(__snake_case )
__A : str = os.path.join(__snake_case , VOCAB_FILES_NAMES['vocab_file'] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
__A : str = os.path.join(__snake_case , 'bpecodes' )
if not os.path.isfile(__snake_case ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
__A : Optional[int] = os.path.join(__snake_case , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__snake_case , __snake_case )
# model config
__A : Dict = os.path.join(__snake_case , 'config.json' )
__A : Union[str, Any] = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
__A : Any = os.path.join(__snake_case , __snake_case )
__A : Optional[Any] = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
__A : Any = chkpt['model']
# remove unneeded keys
__A : Union[str, Any] = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
__A : List[Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
__A : Dict = model_state_dict.pop(__snake_case )
else:
__A : int = model_state_dict.pop(__snake_case )
__A : int = BioGptConfig.from_pretrained(__snake_case )
__A : Optional[int] = BioGptForCausalLM(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case )
# save
__A : Optional[int] = os.path.join(__snake_case , __snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(__snake_case , __snake_case )
print('Conversion is done!' )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 190
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase)
| 190
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_ = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
SCREAMING_SNAKE_CASE_ = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_INIT_CONFIGURATION
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = BertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : List[Any]="[UNK]" ,lowerCamelCase__ : Tuple="[SEP]" ,lowerCamelCase__ : Tuple="[PAD]" ,lowerCamelCase__ : List[Any]="[CLS]" ,lowerCamelCase__ : str="[MASK]" ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : Dict ,) -> Dict:
'''simple docstring'''
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,tokenize_chinese_chars=lowerCamelCase__ ,strip_accents=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 193
|
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
| 1
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase__ :
def __init__(self : List[str] , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[int]=3 , snake_case_ : int=6_4 , snake_case_ : Optional[int]=None ):
__a : Dict = np.random.default_rng(snake_case_ )
__a : str = length
__a : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__a : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__(self : List[str] ):
return self.length
def __getitem__(self : List[Any] , snake_case_ : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCamelCase__ ( torch.nn.Module ):
def __init__(self : List[str] , snake_case_ : Optional[Any]=0 , snake_case_ : Optional[int]=0 , snake_case_ : Dict=False ):
super().__init__()
__a : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__a : str = True
def lowerCAmelCase (self : List[str] , snake_case_ : Optional[Any]=None ):
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
__a : List[Any] = False
return x * self.a[0] + self.b[0]
class UpperCamelCase__ ( torch.nn.Module ):
def __init__(self : Tuple , snake_case_ : Union[str, Any]=0 , snake_case_ : int=0 , snake_case_ : Any=False ):
super().__init__()
__a : Union[str, Any] = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__a : Optional[int] = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
__a : Union[str, Any] = True
def lowerCAmelCase (self : int , snake_case_ : str=None ):
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
__a : List[Any] = False
return x * self.a + self.b
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1_6 ):
from datasets import load_dataset
from transformers import AutoTokenizer
__a : Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__a : Union[str, Any] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__a : Any = load_dataset('''csv''' , data_files=lowerCAmelCase__ )
__a : Optional[Any] = datasets['''train'''].unique('''label''' )
__a : Union[str, Any] = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
__a : Dict = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
if "label" in examples:
__a : Union[str, Any] = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(lowerCAmelCase__ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__a : Union[str, Any] = DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__a : Any = DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 216
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase__ =['text', 'image', 'audio']
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
inputs.append(create_inputs(lowerCAmelCase__ ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def __UpperCamelCase ( lowerCAmelCase__ : List ):
__a : List[str] = []
for output in outputs:
if isinstance(lowerCAmelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowerCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowerCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class UpperCamelCase__ :
def lowerCAmelCase (self : Any ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__a : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__a : Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase (self : List[Any] ):
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : List[Any] = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__a : Tuple = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowerCAmelCase (self : List[Any] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def lowerCAmelCase (self : Any ):
__a : Any = create_inputs(self.tool.inputs )
__a : Union[str, Any] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__a : Tuple = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__a : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowerCAmelCase (self : Optional[int] ):
__a : Any = create_inputs(self.tool.inputs )
__a : Dict = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__a : Optional[Any] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__a : Dict = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
| 216
| 1
|
def UpperCamelCase_( snake_case__: int = 60_08_51_47_51_43 ) -> int:
try:
UpperCAmelCase__ = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
while i * i <= n:
while n % i == 0:
UpperCAmelCase__ = i
n //= i
i += 1
if n > 1:
UpperCAmelCase__ = n
return int(snake_case__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 335
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __lowercase : List[str] ) -> bool:
"""simple docstring"""
if len(_UpperCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__UpperCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__A : Tuple = logging.get_logger(__name__)
__A : List[Any] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __A ( lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : str ):
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class __A ( lowerCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None ):
lowerCAmelCase : str = max_length
lowerCAmelCase : Any = max_position_embeddings
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = input_ids.shape[-1]
lowerCAmelCase : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'exceptions, performance degradation, or nothing at all.' )
return is_done
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'with `max_length = start_length + max_new_tokens` instead.' , UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = start_length
lowerCAmelCase : List[Any] = max_new_tokens
lowerCAmelCase : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[int] ):
return input_ids.shape[-1] >= self.max_length
class __A ( lowerCAmelCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[float] = None ):
lowerCAmelCase : List[str] = max_time
lowerCAmelCase : Optional[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : List[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : List[Any] ):
return time.time() - self.initial_timestamp > self.max_time
class __A ( lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ):
return any(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) for criteria in self )
@property
def lowercase__ ( self : Tuple ):
for stopping_criterium in self:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
lowerCAmelCase : Dict = stopping_criteria.max_length
lowerCAmelCase : Dict = deepcopy(_UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', _UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCAmelCase ) )
return new_stopping_criteria
| 138
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase_ ( __A, __A, __A, __A, __A=True, __A="pt" ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = {"add_prefix_space": True} if isinstance(__A, __A ) and not line.startswith(" " ) else {}
UpperCAmelCase__ = padding_side
return tokenizer(
[line], max_length=__A, padding="max_length" if pad_to_max_length else None, truncation=__A, return_tensors=__A, add_special_tokens=__A, **__A, )
def lowerCAmelCase_ ( __A, __A, __A=None, ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = input_ids.ne(__A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A ( UpperCAmelCase_ ):
def __init__(self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any="train" , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any="" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = Path(__UpperCAmelCase ).joinpath(type_path + ".source" )
UpperCAmelCase__ = Path(__UpperCAmelCase ).joinpath(type_path + ".target" )
UpperCAmelCase__ = self.get_char_lens(self.src_file )
UpperCAmelCase__ = max_source_length
UpperCAmelCase__ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = prefix
if n_obs is not None:
UpperCAmelCase__ = self.src_lens[:n_obs]
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = tgt_lang
def __len__(self : List[str] ) -> Any:
"""simple docstring"""
return len(self.src_lens )
def __getitem__(self : str , __UpperCAmelCase : Union[str, Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
UpperCAmelCase__ = index + 1 # linecache starts at 1
UpperCAmelCase__ = self.prefix + linecache.getline(str(self.src_file ) , __UpperCAmelCase ).rstrip("\n" )
UpperCAmelCase__ = linecache.getline(str(self.tgt_file ) , __UpperCAmelCase ).rstrip("\n" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __UpperCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer
)
UpperCAmelCase__ = self.tokenizer.generator if isinstance(self.tokenizer , __UpperCAmelCase ) else self.tokenizer
UpperCAmelCase__ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_source_length , "right" )
UpperCAmelCase__ = encode_line(__UpperCAmelCase , __UpperCAmelCase , self.max_target_length , "right" )
UpperCAmelCase__ = source_inputs["input_ids"].squeeze()
UpperCAmelCase__ = target_inputs["input_ids"].squeeze()
UpperCAmelCase__ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase_ (__UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
return [len(__UpperCAmelCase ) for x in Path(__UpperCAmelCase ).open().readlines()]
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : List[str] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
UpperCAmelCase__ = torch.stack([x["input_ids"] for x in batch] )
UpperCAmelCase__ = torch.stack([x["attention_mask"] for x in batch] )
UpperCAmelCase__ = torch.stack([x["decoder_input_ids"] for x in batch] )
UpperCAmelCase__ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __UpperCAmelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ = trim_batch(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = trim_batch(__UpperCAmelCase , __UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
UpperCamelCase__ = getLogger(__name__)
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
return list(itertools.chain.from_iterable(__A ) )
def lowerCAmelCase_ ( __A ) -> None:
'''simple docstring'''
UpperCAmelCase__ = get_git_info()
save_json(__A, os.path.join(__A, "git_log.json" ) )
def lowerCAmelCase_ ( __A, __A, __A=4, **__A ) -> List[str]:
'''simple docstring'''
with open(__A, "w" ) as f:
json.dump(__A, __A, indent=__A, **__A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
with open(__A ) as f:
return json.load(__A )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = git.Repo(search_parent_directories=__A )
UpperCAmelCase__ = {
"repo_id": str(__A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase_ ( __A, __A ) -> List:
'''simple docstring'''
return list(map(__A, __A ) )
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
with open(__A, "wb" ) as f:
return pickle.dump(__A, __A )
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
def remove_articles(__A ):
return re.sub(r"\b(a|an|the)\b", " ", __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCAmelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = normalize_answer(__A ).split()
UpperCAmelCase__ = normalize_answer(__A ).split()
UpperCAmelCase__ = Counter(__A ) & Counter(__A )
UpperCAmelCase__ = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ = 1.0 * num_same / len(__A )
UpperCAmelCase__ = 1.0 * num_same / len(__A )
UpperCAmelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
return normalize_answer(__A ) == normalize_answer(__A )
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
assert len(__A ) == len(__A )
UpperCAmelCase__ = 0
for hypo, pred in zip(__A, __A ):
em += exact_match_score(__A, __A )
if len(__A ) > 0:
em /= len(__A )
return {"em": em}
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCAmelCase_ ( __A, __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ = "dropout_rate"
for p in extra_params:
if getattr(__A, __A, __A ):
if not hasattr(__A, __A ) and not hasattr(__A, equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__A ) )
delattr(__A, __A )
continue
UpperCAmelCase__ = p if hasattr(__A, __A ) else equivalent_param[p]
setattr(__A, __A, getattr(__A, __A ) )
delattr(__A, __A )
return hparams, config
| 367
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143
| 0
|
from numpy import exp, pi, sqrt
def a__ ( snake_case , snake_case = 0.0 , snake_case = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45
| 0
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class A__ :
def __init__( self : Optional[int] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =psutil.Process()
_SCREAMING_SNAKE_CASE =False
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =-1
while True:
_SCREAMING_SNAKE_CASE =max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =threading.Thread(target=self.peak_monitor )
_SCREAMING_SNAKE_CASE =True
self.thread.start()
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase : Tuple = PeakCPUMemory()
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE =psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE =torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCAmelCase ( _UpperCamelCase : Any ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_SCREAMING_SNAKE_CASE =(psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
_SCREAMING_SNAKE_CASE =(cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_SCREAMING_SNAKE_CASE =(torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
_SCREAMING_SNAKE_CASE =(torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
_SCREAMING_SNAKE_CASE =measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 114
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : int = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class A__ ( A__ ):
A__ = 'data2vec-vision'
def __init__( self : str , _a : Optional[int]=768 , _a : List[str]=12 , _a : Any=12 , _a : Tuple=3072 , _a : Optional[Any]="gelu" , _a : Tuple=0.0 , _a : str=0.0 , _a : Dict=0.02 , _a : int=1e-12 , _a : List[str]=224 , _a : str=16 , _a : Dict=3 , _a : int=False , _a : List[str]=False , _a : str=False , _a : str=False , _a : Optional[int]=0.1 , _a : List[str]=0.1 , _a : Dict=True , _a : str=[3, 5, 7, 11] , _a : List[Any]=[1, 2, 3, 6] , _a : Any=True , _a : Optional[Any]=0.4 , _a : str=256 , _a : Dict=1 , _a : List[str]=False , _a : List[Any]=255 , **_a : Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =use_mask_token
_SCREAMING_SNAKE_CASE =use_absolute_position_embeddings
_SCREAMING_SNAKE_CASE =use_relative_position_bias
_SCREAMING_SNAKE_CASE =use_shared_relative_position_bias
_SCREAMING_SNAKE_CASE =layer_scale_init_value
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =use_mean_pooling
# decode head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =pool_scales
# auxiliary head attributes (semantic segmentation)
_SCREAMING_SNAKE_CASE =use_auxiliary_head
_SCREAMING_SNAKE_CASE =auxiliary_loss_weight
_SCREAMING_SNAKE_CASE =auxiliary_channels
_SCREAMING_SNAKE_CASE =auxiliary_num_convs
_SCREAMING_SNAKE_CASE =auxiliary_concat_input
_SCREAMING_SNAKE_CASE =semantic_loss_ignore_index
class A__ ( A__ ):
A__ = version.parse('1.11' )
@property
def A ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A ( self : List[Any] ) -> float:
'''simple docstring'''
return 1e-4
| 114
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _UpperCAmelCase ( __a):
__a : List[Any] = """trocr"""
__a : Tuple = ["""past_key_values"""]
__a : Any = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , _A=5_02_65 , _A=10_24 , _A=12 , _A=16 , _A=40_96 , _A="gelu" , _A=5_12 , _A=0.1 , _A=0.0 , _A=0.0 , _A=2 , _A=0.02 , _A=0.0 , _A=True , _A=False , _A=True , _A=True , _A=1 , _A=0 , _A=2 , **_A , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_layers
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : Optional[int] = activation_function
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : List[str] = activation_dropout
_UpperCAmelCase : List[Any] = init_std
_UpperCAmelCase : Optional[Any] = decoder_layerdrop
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Union[str, Any] = scale_embedding
_UpperCAmelCase : str = use_learned_position_embeddings
_UpperCAmelCase : Tuple = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 246
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int = 3, _lowerCAmelCase : int = 7, _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : int = 1
for current_denominator in range(1, limit + 1 ):
_UpperCAmelCase : Union[str, Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_UpperCAmelCase : Optional[Any] = current_numerator
_UpperCAmelCase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 246
| 1
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__A : Optional[Any] = _symbol_database.Default()
__A : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
__A : Dict = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__A : List[Any] = None
__A : Union[str, Any] = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__A : Dict = 45
__A : Optional[Any] = 1_581
__A : Dict = 1_517
__A : Tuple = 1_570
__A : List[Any] = 1_584
__A : Union[str, Any] = 1_793
__A : List[Any] = 1_795
__A : Optional[int] = 1_916
__A : List[str] = 1_864
__A : List[str] = 1_905
__A : Any = 1_919
__A : Any = 2_429
__A : Dict = 2_208
__A : Optional[int] = 2_418
__A : Optional[int] = 2_323
__A : Union[str, Any] = 2_407
# @@protoc_insertion_point(module_scope)
| 365
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__A : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,)
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
| 8
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=4 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_choices
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class a__ ( snake_case__ , unittest.TestCase ):
_a : str = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxAlbertModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("albert-base-v2" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = FlaxAlbertModel.from_pretrained("albert-base-v2" )
__lowerCAmelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(A_ , attention_mask=A_ )[0]
__lowerCAmelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , A_ )
__lowerCAmelCase = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A_ , atol=1E-4 ) )
| 92
|
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for i in range(len(lowercase ) - 1 , 0 , -1 ):
lowerCamelCase_ = False
for j in range(lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j - 1], unsorted[j]
lowerCamelCase_ = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j + 1], unsorted[j]
lowerCamelCase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 204
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( __lowercase ):
_lowercase = "encoder-decoder"
_lowercase = True
def __init__( self , **A_ ):
'''simple docstring'''
super().__init__(**_a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCAmelCase : int = kwargs.pop("encoder" )
_UpperCAmelCase : List[str] = encoder_config.pop("model_type" )
_UpperCAmelCase : List[Any] = kwargs.pop("decoder" )
_UpperCAmelCase : List[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase : Tuple = AutoConfig.for_model(_a , **_a )
_UpperCAmelCase : int = AutoConfig.for_model(_a , **_a )
_UpperCAmelCase : Optional[Any] = True
@classmethod
def _UpperCAmelCase ( cls , A_ , A_ , **A_ ):
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : List[str] = self.encoder.to_dict()
_UpperCAmelCase : str = self.decoder.to_dict()
_UpperCAmelCase : str = self.__class__.model_type
return output
| 350
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (PNDMScheduler,)
_lowercase = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Any = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCAmelCase : Dict = model(A_ , A_ )
_UpperCAmelCase : int = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCAmelCase : Any = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , A_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ):
_UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Any = dummy_past_residuals[:]
_UpperCAmelCase : Any = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : Optional[int] = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : str = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCAmelCase : Dict = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop()
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 189
| 0
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None ):
"""simple docstring"""
A_ : Optional[Any] = value
A_ : Node | None = None # Added in order to delete a node easier
A_ : Node | None = None
A_ : Node | None = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None ):
"""simple docstring"""
A_ : int = root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if new_children is not None: # reset its kids
A_ : Any = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case_ ): # If it is the right children
A_ : int = new_children
else:
A_ : Union[str, Any] = new_children
else:
A_ : Optional[Any] = new_children
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.root is None
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : int = Node(snake_case_ ) # create a new Node
if self.empty(): # if Tree is empty
A_ : Any = new_node # set its root
else: # Tree is not empty
A_ : List[str] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A_ : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
A_ : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
A_ : Any = new_node
break
else:
A_ : Tuple = parent_node.right
A_ : Tuple = parent_node
def lowerCamelCase_ ( self , *snake_case_ ):
"""simple docstring"""
for value in values:
self.__insert(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
A_ : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A_ : str = node.left if value < node.value else node.right
return node
def lowerCamelCase_ ( self , snake_case_ = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
A_ : str = self.root
if not self.empty():
while node.right is not None:
A_ : List[str] = node.right
return node
def lowerCamelCase_ ( self , snake_case_ = None ):
"""simple docstring"""
if node is None:
A_ : str = self.root
if self.root is None:
return None
if not self.empty():
A_ : Optional[Any] = self.root
while node.left is not None:
A_ : Dict = node.left
return node
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = self.search(snake_case_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case_ , snake_case_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case_ , node.left )
else:
A_ : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
A_ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase_ ( self , snake_case_=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if node:
self.inorder(snake_case_ , node.left )
arr.append(node.value )
self.inorder(snake_case_ , node.right )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : list[int] = []
self.inorder(snake_case_ , snake_case_ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = []
if curr_node is not None:
A_ : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
A_ : Optional[int] = BinarySearchTree()
for i in testlist:
t.insert(_UpperCAmelCase )
# Prints all the elements of the list in order traversal
print(_UpperCAmelCase )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCAmelCase )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 286
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Dict = logging.get_logger(__name__)
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : str=False ) -> Dict:
lowerCamelCase_ : Any =[]
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase_ : Optional[int] =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase_ : Optional[int] =""
else:
lowerCamelCase_ : List[str] ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ : Tuple =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ : Union[str, Any] =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Optional[Any] =in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase_ : List[Any] =in_proj_bias[: config.hidden_size]
lowerCamelCase_ : str =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ : Any =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ : str =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase_ : Dict =in_proj_bias[-config.hidden_size :]
def _snake_case ( lowerCamelCase__ : int ) -> Dict:
lowerCamelCase_ : Optional[int] =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ) -> str:
lowerCamelCase_ : List[Any] =dct.pop(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =val
def _snake_case ( ) -> str:
lowerCamelCase_ : Union[str, Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ : Any =Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def _snake_case ( lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=False ) -> Any:
lowerCamelCase_ : List[Any] =BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=lowerCamelCase__ , )
lowerCamelCase_ : Optional[Any] =ViTHybridConfig(backbone_config=lowerCamelCase__ , image_size=384 , num_labels=1_000 )
lowerCamelCase_ : Optional[Any] =False
# load original model from timm
lowerCamelCase_ : Optional[int] =timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase_ : str =timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase__ )
lowerCamelCase_ : Tuple =create_rename_keys(lowerCamelCase__ , lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] ="huggingface/label-files"
lowerCamelCase_ : Optional[Any] ="imagenet-1k-id2label.json"
lowerCamelCase_ : Any =json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ : Optional[int] ={int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[Any] =idalabel
lowerCamelCase_ : List[str] ={v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCamelCase_ : Any =ViTHybridModel(lowerCamelCase__ ).eval()
else:
lowerCamelCase_ : Dict =ViTHybridForImageClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
# create image processor
lowerCamelCase_ : Tuple =create_transform(**resolve_data_config({} , model=lowerCamelCase__ ) )
lowerCamelCase_ : Dict =transform.transforms
lowerCamelCase_ : Any ={
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowerCamelCase_ : Dict =ViTHybridImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase__ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=lowerCamelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ : Dict =prepare_img()
lowerCamelCase_ : Optional[Any] =transform(lowerCamelCase__ ).unsqueeze(0 )
lowerCamelCase_ : Dict =processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ )
# verify logits
with torch.no_grad():
lowerCamelCase_ : str =model(lowerCamelCase__ )
lowerCamelCase_ : List[Any] =outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
lowerCamelCase_ : List[Any] =timm_model.forward_features(lowerCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase__ , outputs.pooler_output , atol=1e-3 )
else:
lowerCamelCase_ : Dict =timm_model(lowerCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
A__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 209
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> List[Any]:
if isinstance(lowerCamelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ : int =[image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase_ : Tuple =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase_ : Any =np.concatenate(lowerCamelCase__ , axis=0 )
lowerCamelCase_ : Any =np.array(lowerCamelCase__ ).astype(np.floataa ) / 255.0
lowerCamelCase_ : Any =image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ : Dict =2.0 * image - 1.0
lowerCamelCase_ : List[Any] =torch.from_numpy(lowerCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase_ : Tuple =torch.cat(lowerCamelCase__ , dim=0 )
return image
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=0.9995 ) -> List[str]:
if not isinstance(lowerCamelCase__ , np.ndarray ):
lowerCamelCase_ : List[Any] =True
lowerCamelCase_ : Tuple =va.device
lowerCamelCase_ : Optional[int] =va.cpu().numpy()
lowerCamelCase_ : List[Any] =va.cpu().numpy()
lowerCamelCase_ : Any =np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) )
if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD:
lowerCamelCase_ : str =(1 - t) * va + t * va
else:
lowerCamelCase_ : Tuple =np.arccos(lowerCamelCase__ )
lowerCamelCase_ : Any =np.sin(lowerCamelCase__ )
lowerCamelCase_ : Optional[int] =theta_a * t
lowerCamelCase_ : Tuple =np.sin(lowerCamelCase__ )
lowerCamelCase_ : List[str] =np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase_ : Union[str, Any] =sin_theta_t / sin_theta_a
lowerCamelCase_ : str =sa * va + sa * va
if inputs_are_torch:
lowerCamelCase_ : Any =torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
return va
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) -> List[str]:
lowerCamelCase_ : List[Any] =F.normalize(lowerCamelCase__ , dim=-1 )
lowerCamelCase_ : Any =F.normalize(lowerCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) -> Any:
for param in model.parameters():
lowerCamelCase_ : Any =value
class lowercase__ ( snake_case__ ):
def __init__( self : Union[str, Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , snake_case__ : CLIPFeatureExtractor , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : str=None , ):
super().__init__()
self.register_modules(
vae=snake_case__ , text_encoder=snake_case__ , clip_model=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , coca_model=snake_case__ , coca_tokenizer=snake_case__ , coca_transform=snake_case__ , )
lowerCamelCase_ : Optional[Any] =(
feature_extractor.size
if isinstance(feature_extractor.size , snake_case__ )
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase_ : Union[str, Any] =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , snake_case__ )
set_requires_grad(self.clip_model , snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : List[Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def UpperCAmelCase__ ( self : int ):
self.enable_attention_slicing(snake_case__ )
def UpperCAmelCase__ ( self : str ):
set_requires_grad(self.vae , snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
set_requires_grad(self.vae , snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
set_requires_grad(self.unet , snake_case__ )
def UpperCAmelCase__ ( self : Tuple ):
set_requires_grad(self.unet , snake_case__ )
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ):
# get the original timestep using init_timestep
lowerCamelCase_ : Optional[int] =min(int(num_inference_steps * strength ) , snake_case__ )
lowerCamelCase_ : Dict =max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ : Union[str, Any] =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any]=None ):
if not isinstance(snake_case__ , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(snake_case__ )}""" )
lowerCamelCase_ : List[str] =image.to(device=snake_case__ , dtype=snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =[
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
lowerCamelCase_ : Tuple =torch.cat(snake_case__ , dim=0 )
else:
lowerCamelCase_ : Any =self.vae.encode(snake_case__ ).latent_dist.sample(snake_case__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : Optional[Any] =0.18_215 * init_latents
lowerCamelCase_ : Optional[Any] =init_latents.repeat_interleave(snake_case__ , dim=0 )
lowerCamelCase_ : List[Any] =randn_tensor(init_latents.shape , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
lowerCamelCase_ : Optional[int] =self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : int =init_latents
return latents
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] ):
lowerCamelCase_ : Optional[int] =self.coca_transform(snake_case__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase_ : Tuple =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCamelCase_ : str =self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
lowerCamelCase_ : int =self.feature_extractor.preprocess(snake_case__ )
lowerCamelCase_ : Tuple =torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase_ : Optional[int] =self.clip_model.get_image_features(snake_case__ )
lowerCamelCase_ : List[str] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ )
lowerCamelCase_ : str =image_embeddings_clip.repeat_interleave(snake_case__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str , ):
lowerCamelCase_ : Any =latents.detach().requires_grad_()
lowerCamelCase_ : Union[str, Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCamelCase_ : Any =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase_ : Any =self.scheduler.alphas_cumprod[timestep]
lowerCamelCase_ : str =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ : Dict =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase_ : Tuple =torch.sqrt(snake_case__ )
lowerCamelCase_ : Optional[int] =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : Optional[Any] =self.scheduler.sigmas[index]
lowerCamelCase_ : Optional[int] =latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : str =1 / 0.18_215 * sample
lowerCamelCase_ : List[Any] =self.vae.decode(snake_case__ ).sample
lowerCamelCase_ : List[Any] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Any =transforms.Resize(self.feature_extractor_size )(snake_case__ )
lowerCamelCase_ : Any =self.normalize(snake_case__ ).to(latents.dtype )
lowerCamelCase_ : Any =self.clip_model.get_image_features(snake_case__ )
lowerCamelCase_ : Optional[int] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ )
lowerCamelCase_ : Union[str, Any] =spherical_dist_loss(snake_case__ , snake_case__ ).mean() * clip_guidance_scale
lowerCamelCase_ : Union[str, Any] =-torch.autograd.grad(snake_case__ , snake_case__ )[0]
if isinstance(self.scheduler , snake_case__ ):
lowerCamelCase_ : int =latents.detach() + grads * (sigma**2)
lowerCamelCase_ : Tuple =noise_pred_original
else:
lowerCamelCase_ : Union[str, Any] =noise_pred_original - torch.sqrt(snake_case__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Any , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = 512 , snake_case__ : Optional[int] = 512 , snake_case__ : float = 0.6 , snake_case__ : Optional[int] = 50 , snake_case__ : Optional[float] = 7.5 , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[float] = 100 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : float = 0.8 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , ):
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(snake_case__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(snake_case__ , torch.Generator ) and batch_size > 1:
lowerCamelCase_ : List[str] =[generator] + [None] * (batch_size - 1)
lowerCamelCase_ : Any =[
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase_ : Optional[Any] =[x[0] for x in coca_is_none if x[1]]
lowerCamelCase_ : Any =", ".join(snake_case__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(snake_case__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ : Union[str, Any] =self.get_image_description(snake_case__ )
if style_prompt is None:
if len(snake_case__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowerCamelCase_ : str =self.get_image_description(snake_case__ )
# get prompt text embeddings for content and style
lowerCamelCase_ : str =self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCamelCase_ : List[str] =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ : int =self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCamelCase_ : Optional[int] =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase_ : Dict =slerp(snake_case__ , snake_case__ , snake_case__ )
# duplicate text embeddings for each generation per prompt
lowerCamelCase_ : str =text_embeddings.repeat_interleave(snake_case__ , dim=0 )
# set timesteps
lowerCamelCase_ : List[Any] ="offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase_ : List[Any] ={}
if accepts_offset:
lowerCamelCase_ : Optional[int] =1
self.scheduler.set_timesteps(snake_case__ , **snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] =self.get_timesteps(snake_case__ , snake_case__ , self.device )
lowerCamelCase_ : Union[str, Any] =timesteps[:1].repeat(snake_case__ )
# Preprocess image
lowerCamelCase_ : str =preprocess(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : int =self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ )
lowerCamelCase_ : Dict =preprocess(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase_ : Tuple =self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ )
lowerCamelCase_ : int =slerp(snake_case__ , snake_case__ , snake_case__ )
if clip_guidance_scale > 0:
lowerCamelCase_ : List[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ )
lowerCamelCase_ : Optional[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ )
lowerCamelCase_ : List[Any] =slerp(
snake_case__ , snake_case__ , snake_case__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : str =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[Any] =content_text_input.input_ids.shape[-1]
lowerCamelCase_ : Union[str, Any] =self.tokenizer([""] , padding="max_length" , max_length=snake_case__ , return_tensors="pt" )
lowerCamelCase_ : List[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase_ : List[str] =uncond_embeddings.repeat_interleave(snake_case__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : List[str] =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : Optional[Any] =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : List[str] =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase_ : Union[str, Any] =torch.randn(snake_case__ , generator=snake_case__ , device="cpu" , dtype=snake_case__ ).to(
self.device )
else:
lowerCamelCase_ : Tuple =torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCamelCase_ : Any =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : List[Any] ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Dict ={}
if accepts_eta:
lowerCamelCase_ : Tuple =eta
# check if the scheduler accepts generator
lowerCamelCase_ : Tuple ="generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase_ : Optional[int] =generator
with self.progress_bar(total=snake_case__ ):
for i, t in enumerate(snake_case__ ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : List[str] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Optional[Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCamelCase_ : Tuple =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ : Dict =noise_pred.chunk(2 )
lowerCamelCase_ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase_ : Optional[int] =(
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.cond_fn(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : Dict =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase_ : str =1 / 0.18_215 * latents
lowerCamelCase_ : List[str] =self.vae.decode(snake_case__ ).sample
lowerCamelCase_ : str =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : List[str] =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 209
| 1
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
SCREAMING_SNAKE_CASE :Dict = pytest.mark.integration
@require_faiss
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
__A = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase_ ( self : Optional[Any] ):
import faiss
__A = self._create_dummy_dataset()
__A = dset.map(
lambda A ,A : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=A ,keep_in_memory=A )
__A = dset.add_faiss_index("vecs" ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT )
__A , __A = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
dset.drop_index("vecs" )
def UpperCamelCase_ ( self : Tuple ):
import faiss
__A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
__A , __A = dset.get_nearest_examples("vecs" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def UpperCamelCase_ ( self : List[str] ):
import faiss
__A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A ) as tmp_file:
dset.save_faiss_index("vecs" ,tmp_file.name )
dset.load_faiss_index("vecs2" ,tmp_file.name )
os.unlink(tmp_file.name )
__A , __A = dset.get_nearest_examples("vecs2" ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(A ,partial(dset.get_nearest_examples ,"vecs2" ,np.ones(5 ,dtype=np.floataa ) ) )
def UpperCamelCase_ ( self : Dict ):
from elasticsearch import Elasticsearch
__A = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__A = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__A = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__A = Elasticsearch()
dset.add_elasticsearch_index("filename" ,es_client=A )
__A , __A = dset.get_nearest_examples("filename" ,"my_name-train_29" )
self.assertEqual(examples["filename"][0] ,"my_name-train_29" )
@require_faiss
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
import faiss
__A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
__A = np.zeros(5 ,dtype=np.floataa )
__A = 1
__A , __A = index.search(A )
self.assertRaises(A ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
__A = np.eye(5 ,dtype=np.floataa )[::-1]
__A , __A = index.search_batch(A )
self.assertRaises(A ,index.search_batch ,queries[0] )
__A = [scores[0] for scores in total_scores]
__A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,A )
def UpperCamelCase_ ( self : Tuple ):
import faiss
__A = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
__A = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(A ):
__A = FaissIndex(string_factory="Flat" ,custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase_ ( self : Any ):
import faiss
__A = faiss.IndexFlat(5 )
__A = FaissIndex(custom_index=A )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def UpperCamelCase_ ( self : Union[str, Any] ):
import faiss
__A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A ) as tmp_file:
index.save(tmp_file.name )
__A = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__A = np.zeros(5 ,dtype=np.floataa )
__A = 1
__A , __A = index.search(A )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
import faiss
__A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__A = "index.faiss"
__A = F'''mock://{index_name}'''
index.save(a_ , storage_options=mockfs.storage_options )
__A = FaissIndex.load(a_ , storage_options=mockfs.storage_options )
__A = np.zeros(5 , dtype=np.floataa )
__A = 1
__A , __A = index.search(a_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__A = Elasticsearch()
__A = {"acknowledged": True}
__A = ElasticSearchIndex(es_client=A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__A = "foo"
__A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__A , __A = index.search(A )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
__A = "foo"
__A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__A , __A = index.search(A ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
__A = ["foo", "bar", "foobar"]
__A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__A , __A = index.search_batch(A )
__A = [scores[0] for scores in total_scores]
__A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) ,0 )
self.assertListEqual([1, 1, 1] ,A )
# batched queries with timeout
__A = ["foo", "bar", "foobar"]
__A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__A , __A = index.search_batch(A ,request_timeout=30 )
__A = [scores[0] for scores in total_scores]
__A = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A ) ,0 )
self.assertListEqual([1, 1, 1] ,A )
| 15
|
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
__A = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__A = False
__A = 0
__A = 0
__A = 1E12
while not convergence:
# Multiple matrix by the vector.
__A = np.dot(a_ , a_ )
# Normalize the resulting output vector.
__A = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__A = vector.conj().T if is_complex else vector.T
__A = np.dot(a_ , np.dot(a_ , a_ ) )
# Check convergence.
__A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__A = True
__A = lambda_
if is_complex:
__A = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__A = np.array([4_1, 4, 2_0] )
__A = real_input_matrix.astype(np.complexaaa )
__A = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__A = real_input_matrix
__A = real_vector
elif problem_type == "complex":
__A = complex_input_matrix
__A = complex_vector
# Our implementation.
__A , __A = power_iteration(a_ , a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__A , __A = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
__A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15
| 1
|
'''simple docstring'''
import math
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__A ='Enter the base and the power separated by a comma: '
__A , __A =map(int, input(prompt).split(','))
__A , __A =map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
__A =res(xa, ya)
__A =res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 283
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : str = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
UpperCAmelCase__ : List[str] = Vector()
def snake_case__ ( self):
UpperCAmelCase__ : Any = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(_lowerCamelCase) , """(0,0,0,0,0,1)""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4])
self.assertEqual(len(_lowerCamelCase) , 4)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = Vector([1, 2])
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4, 5])
UpperCAmelCase__ : Optional[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
UpperCAmelCase__ : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3)
def snake_case__ ( self):
UpperCAmelCase__ : int = Vector([1, 2, 3])
UpperCAmelCase__ : Optional[Any] = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 2, 3])
UpperCAmelCase__ : Dict = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = Vector([1, 2, 3])
UpperCAmelCase__ : Optional[int] = Vector([2, -1, 4]) # for test of dot product
UpperCAmelCase__ : Any = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , """(3.0,6.0,9.0)""")
self.assertEqual((a * b) , 0)
def snake_case__ ( self):
self.assertEqual(str(zero_vector(10)).count("""0""") , 10)
def snake_case__ ( self):
self.assertEqual(str(unit_basis_vector(3 , 1)) , """(0,1,0)""")
def snake_case__ ( self):
UpperCAmelCase__ : Any = Vector([1, 2, 3])
UpperCAmelCase__ : List[str] = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , _lowerCamelCase , _lowerCamelCase)) , """(3,4,7)""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = Vector([1, 0, 0, 0, 0, 0])
UpperCAmelCase__ : Optional[int] = x.copy()
self.assertEqual(str(_lowerCamelCase) , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : str = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(_lowerCamelCase) , """(0,1,0)""")
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowerCamelCase , _lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
UpperCAmelCase__ : List[Any] = Vector([1, 2, 3])
self.assertEqual("""(14,32,50)""" , str(a * x))
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2))
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowerCamelCase))
def snake_case__ ( self):
UpperCAmelCase__ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def snake_case__ ( self):
UpperCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b))
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
UpperCAmelCase__ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b))
def snake_case__ ( self):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 283
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt'''}
__lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
__lowerCamelCase : Dict = {
'''openbmb/cpm-ant-10b''': 1024,
}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = collections.OrderedDict()
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ = reader.readlines()
for index, token in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ = index
return vocab
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Any , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="<unk>" , _lowercase : Optional[Any]=2_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab
SCREAMING_SNAKE_CASE__ = unk_token
SCREAMING_SNAKE_CASE__ = max_input_chars_per_word
def __a ( self : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = []
while start < len(_lowercase ):
SCREAMING_SNAKE_CASE__ = len(_lowercase )
SCREAMING_SNAKE_CASE__ = None
while start < end:
SCREAMING_SNAKE_CASE__ = """""".join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
SCREAMING_SNAKE_CASE__ = end
return sub_tokens
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = False
def __init__( self : Dict , _lowercase : List[Any] , _lowercase : List[str]="<d>" , _lowercase : List[Any]="</d>" , _lowercase : int="<s>" , _lowercase : Union[str, Any]="</s>" , _lowercase : Optional[Any]="<pad>" , _lowercase : List[Any]="<unk>" , _lowercase : Any="</n>" , _lowercase : str="</_>" , _lowercase : Union[str, Any]="left" , **_lowercase : List[str] , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ = bod_token
SCREAMING_SNAKE_CASE__ = eod_token
SCREAMING_SNAKE_CASE__ = load_vocab(_lowercase )
SCREAMING_SNAKE_CASE__ = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : int ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def __a ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : List[str] , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def __a ( self : List[Any] , _lowercase : List[str] , **_lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
return token in self.encoder
def __a ( self : Optional[int] , _lowercase : List[str] ):
"""simple docstring"""
return "".join(_lowercase )
def __a ( self : str , _lowercase : List[Any] ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def __a ( self : Optional[Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def __a ( self : List[str] , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
SCREAMING_SNAKE_CASE__ = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
SCREAMING_SNAKE_CASE__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
SCREAMING_SNAKE_CASE__ = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ = self.encoder["""\n"""]
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 219
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = '''Hello, World!'''
__lowerCamelCase : Optional[Any] = '''en_XX'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""data_bin""" )
SCREAMING_SNAKE_CASE__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__UpperCamelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__UpperCamelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE__ = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__UpperCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 219
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'marian'
__UpperCAmelCase : Any = ['past_key_values']
__UpperCAmelCase : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _a=58_101 , _a=None , _a=1_024 , _a=12 , _a=4_096 , _a=16 , _a=12 , _a=4_096 , _a=16 , _a=0.0 , _a=0.0 , _a=True , _a=True , _a="gelu" , _a=1_024 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=58_100 , _a=False , _a=58_100 , _a=0 , _a=0 , _a=True , **_a , ):
__a = vocab_size
__a = decoder_vocab_size or vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
__a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__a = {0: '''batch'''}
__a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__a = {0: '''batch''', 1: '''decoder_sequence'''}
__a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(_a ):
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(_a , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(_a ):
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
__a = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
__a = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__a = dict(**_a , **_a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a , __a = common_inputs['''input_ids'''].shape
__a = common_inputs['''decoder_input_ids'''].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_a , _a )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(_a , _a )
__a = max(_a , _a ) - min_num_layers
__a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_a ):
common_inputs["past_key_values"].append(
(
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
torch.zeros(_a ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_a , _a ):
common_inputs["past_key_values"].append((torch.zeros(_a ), torch.zeros(_a )) )
return common_inputs
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
__a = self._generate_dummy_inputs_for_encoder_and_decoder(
_a , _a , _a , _a , _a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a , __a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs['''attention_mask'''].dtype
__a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
__a = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(_a )
]
return common_inputs
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(_a )
__a = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
__a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(_a , return_tensors=_a ) )
return common_inputs
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
else:
__a = self._generate_dummy_inputs_for_causal_lm(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
return common_inputs
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(_a , _a , _a , _a )
else:
__a = super(_a , self )._flatten_past_key_values_(
_a , _a , _a , _a )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 11
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11
| 1
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a__ : Any = sys.version_info >= (3, 1_0)
def _UpperCamelCase ( __A=None , __A=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
@dataclass
class lowercase_ :
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[1, 2, 3] )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
__UpperCAmelCase = field()
__UpperCAmelCase = field()
__UpperCAmelCase = field()
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field()
__UpperCAmelCase = None
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
class lowercase_ ( unittest.TestCase ):
def __a ( self , a , a ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , a ) and yy.get("choices" , a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](a ) , yy["type"](a ) )
del xx["type"], yy["type"]
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument("--bar" , type=a , required=a )
expected.add_argument("--baz" , type=a , required=a )
expected.add_argument("--flag" , type=a , default=a , const=a , nargs="?" )
self.argparsersEqual(a , a )
UpperCamelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase__) , ) = parser.parse_args_into_dataclasses(a , look_for_args_file=a )
self.assertFalse(example.flag )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , default=a , const=a , nargs="?" )
expected.add_argument("--baz" , type=a , default=a , const=a , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=a , dest="baz" )
expected.add_argument("--opt" , type=a , default=a )
UpperCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __a ( self ):
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=a )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=a )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(
a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=a , type=a )
expected.add_argument("--bar" , default=a , type=a , help="help message" )
expected.add_argument("--baz" , default=a , type=a )
expected.add_argument("--ces" , nargs="+" , default=[] , type=a )
expected.add_argument("--des" , nargs="+" , default=[] , type=a )
UpperCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , bar=a , baz=a , ces=[] , des=[] ) )
UpperCamelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(a , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=a , required=a )
expected.add_argument("--required_str" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
expected.add_argument("--opt" , type=a , default=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase__ = parser.parse_dict(a )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(a , parser.parse_dict , a , allow_extra_keys=a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_json" )
os.mkdir(a )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_yaml" )
os.mkdir(a )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
self.assertIsNotNone(a )
| 80
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Dict =nn.functional.normalize(__lowerCAmelCase )
UpperCAmelCase : Tuple =nn.functional.normalize(__lowerCAmelCase )
return torch.mm(__lowerCAmelCase , normalized_text_embeds.t() )
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[str] = CLIPConfig
__lowerCamelCase : List[Any] = ["""CLIPEncoderLayer"""]
def __init__( self , snake_case__ ) -> Dict:
'''simple docstring'''
super().__init__(snake_case__ )
UpperCAmelCase : Dict =CLIPVisionModel(config.vision_config )
UpperCAmelCase : Optional[Any] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case__ )
UpperCAmelCase : int =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : List[str] =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case__ )
UpperCAmelCase : str =nn.Parameter(torch.ones(17 ) , requires_grad=snake_case__ )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3 ) , requires_grad=snake_case__ )
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : Optional[Any] =self.visual_projection(snake_case__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : List[str] =cosine_distance(snake_case__ , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase : Tuple =[]
UpperCAmelCase : Dict =image_embeds.shape[0]
for i in range(snake_case__ ):
UpperCAmelCase : str ={'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : str =0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase : Optional[Any] =special_cos_dist[i][concept_idx]
UpperCAmelCase : Union[str, Any] =self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCAmelCase : int =0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase : Any =cos_dist[i][concept_idx]
UpperCAmelCase : Optional[int] =self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase : int =round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case__ )
result.append(snake_case__ )
UpperCAmelCase : Optional[int] =[len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any =self.vision_model(snake_case__ )[1] # pooled_output
UpperCAmelCase : List[str] =self.visual_projection(snake_case__ )
UpperCAmelCase : Any =cosine_distance(snake_case__ , self.special_care_embeds )
UpperCAmelCase : Optional[Any] =cosine_distance(snake_case__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase : Optional[Any] =0.0
UpperCAmelCase : Any =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase : List[Any] =special_care * 0.01
UpperCAmelCase : Union[str, Any] =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase : List[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase : str =torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 348
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_UpperCAmelCase : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: "
_UpperCAmelCase : List[Any] = "=======\n>>>>>>>\n"
_UpperCAmelCase : Optional[int] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
_UpperCAmelCase : Optional[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def A ( lowercase ) -> Optional[Any]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@staticmethod
def __UpperCamelCase ( A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=A_ , required=A_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=A_ , required=A_ , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , *A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = get_logger('datasets-cli/converting' )
UpperCamelCase = tfds_path
UpperCamelCase = datasets_directory
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
UpperCamelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
UpperCamelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
UpperCamelCase = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {}
if os.path.isdir(self._tfds_path ):
UpperCamelCase = os.listdir(A_ )
else:
UpperCamelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
UpperCamelCase = os.path.join(A_ , A_ )
UpperCamelCase = os.path.join(A_ , A_ )
if not os.path.isfile(A_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(A_ , encoding='utf-8' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = []
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = []
for line in lines:
UpperCamelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
UpperCamelCase = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
UpperCamelCase = ''
continue
elif "from absl import logging" in out_line:
UpperCamelCase = 'from datasets import logging\n'
elif "getLogger" in out_line:
UpperCamelCase = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
UpperCamelCase = True
UpperCamelCase = list(filter(lambda A_ : e in out_line , A_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A_ ) + '\n' )
out_lines.append(A_ )
out_lines.append(A_ )
continue
else:
for pattern, replacement in TO_CONVERT:
UpperCamelCase = re.sub(A_ , A_ , A_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
UpperCamelCase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , A_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
UpperCamelCase = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
UpperCamelCase = True
out_lines.append(A_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
UpperCamelCase = f_name.replace('.py' , '' )
UpperCamelCase = os.path.join(A_ , A_ )
UpperCamelCase = os.path.join(A_ , A_ )
os.makedirs(A_ , exist_ok=A_ )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A_ )
if needs_manual_update:
with_manual_update.append(A_ )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.writelines(A_ )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
UpperCamelCase = os.path.basename(A_ )
UpperCamelCase = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(A_ , A_ )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 358
|
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
@staticmethod
def __UpperCamelCase ( A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCamelCase ( self , A_ , A_ , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
UpperCamelCase = kwargs.pop('main_process_only' , A_ )
UpperCamelCase = kwargs.pop('in_order' , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
UpperCamelCase , UpperCamelCase = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase , UpperCamelCase = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def A ( lowercase , lowercase = None ) -> Dict:
'''simple docstring'''
if log_level is None:
UpperCamelCase = os.environ.get('ACCELERATE_LOG_LEVEL' , lowercase )
UpperCamelCase = logging.getLogger(lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase , {} )
| 110
| 0
|
import argparse
from collections import defaultdict
import yaml
_A = '''docs/source/en/_toctree.yml'''
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase_ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ = []
for duplicate_key in duplicates:
lowerCAmelCase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_A , key=lambda _A : s["title"].lower() )
def __UpperCamelCase ( _A=False ):
with open(_A , encoding='''utf-8''' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['''sections''']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase_ = api_doc[model_idx]['''sections''']
lowerCAmelCase_ = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section]
lowerCAmelCase_ = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase_ = modality_doc['''sections''']
lowerCAmelCase_ = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase_ = model_doc
lowerCAmelCase_ = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 278
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape
lowerCAmelCase_ = nn.Linear(_A , _A , bias=_A )
lowerCAmelCase_ = emb.weight.data
return lin_layer
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )
lowerCAmelCase_ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase_ = checkpoint['''model''']
remove_ignore_keys_(_A )
lowerCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase_ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase_ = XGLMConfig(
vocab_size=_A , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ = XGLMForCausalLM(_A )
lowerCAmelCase_ = model.load_state_dict(_A , strict=_A )
print(_A )
lowerCAmelCase_ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A = parser.parse_args()
_A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 278
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( snake_case_ , unittest.TestCase ):
UpperCamelCase : Tuple = BlenderbotSmallTokenizer
UpperCamelCase : List[Any] = False
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
_a : List[Any] = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
_a : Dict = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_a : Tuple = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
_a : Any = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
_a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase__ ) )
def _lowercase ( self : int , **UpperCAmelCase__ : Optional[Any] ) -> str:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def _lowercase ( self : str , UpperCAmelCase__ : str ) -> Dict:
_a : Tuple = """adapt act apte"""
_a : Union[str, Any] = """adapt act apte"""
return input_text, output_text
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
_a : Union[str, Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : List[Any] = """adapt act apte"""
_a : List[str] = ["""adapt""", """act""", """ap@@""", """te"""]
_a : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Optional[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1384]
_a : Dict = """I am a small frog."""
_a : Optional[int] = tok([src_text] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ )["""input_ids"""]
_a : Optional[int] = tok.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowercase ( self : Dict ) -> str:
_a : int = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
_a : List[Any] = """I am a small frog ."""
_a : Tuple = """."""
_a : Optional[int] = tok(UpperCAmelCase__ )["""input_ids"""]
_a : Dict = tok(UpperCAmelCase__ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 324
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324
| 1
|
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('only integers accepted as input' )
else:
_lowercase : Any = str(abs(lowerCamelCase_ ) )
_lowercase : Dict = [list(lowerCamelCase_ ) for char in range(len(lowerCamelCase_ ) )]
for index in range(len(lowerCamelCase_ ) ):
num_transpositions[index].pop(lowerCamelCase_ )
return max(
int(''.join(list(lowerCamelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 21
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str = field(
default=__A , metadata={'help': 'Model type selected in the list: ' + ', '.join(__A )} )
lowerCAmelCase__ : str = field(
default=__A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ : int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ : bool = field(
default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ : bool = field(
default=__A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Dict = 'train'
lowerCAmelCase__ : int = 'dev'
class UpperCamelCase__( __A ):
lowerCAmelCase__ : SquadDataTrainingArguments
lowerCAmelCase__ : List[SquadFeatures]
lowerCAmelCase__ : Split
lowerCAmelCase__ : bool
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = Split.train ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = "pt" ,) -> Optional[Any]:
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A__ = mode
# Load data features from cache or dataset file
A__ = 'v2' if args.version_2_with_negative else 'v1'
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '.lock'
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(__UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features['features']
A__ = self.old_features.get('dataset' ,__UpperCAmelCase )
A__ = self.old_features.get('examples' ,__UpperCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__ , A__ = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=__UpperCAmelCase ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=__UpperCAmelCase ,)
A__ = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,__UpperCAmelCase ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,__UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids ,dtype=torch.long )
A__ = torch.tensor(feature.attention_mask ,dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids ,dtype=torch.long )
A__ = torch.tensor(feature.cls_index ,dtype=torch.long )
A__ = torch.tensor(feature.p_mask ,dtype=torch.float )
A__ = torch.tensor(feature.is_impossible ,dtype=torch.float )
A__ = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position ,dtype=torch.long )
A__ = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 221
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = AlbertTokenizer
def __init__(self , __a=None , __a=None , __a=True , __a=True , __a=False , __a="[CLS]" , __a="[SEP]" , __a="<unk>" , __a="[SEP]" , __a="<pad>" , __a="[CLS]" , __a="[MASK]" , **__a , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCamelCase = (
AddedToken(__a , lstrip=__a , rstrip=__a , normalized=__a )
if isinstance(__a , __a )
else mask_token
)
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 244
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase__ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=_SCREAMING_SNAKE_CASE , all_configs=_SCREAMING_SNAKE_CASE , save_infos=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TestCommand(*_SCREAMING_SNAKE_CASE )
test_command.run()
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "README.md" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_351_563,
"num_examples": 10_000,
},
{
"name": "validation",
"num_bytes": 238_418,
"num_examples": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["default"] , _SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos["default"] , _SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(_SCREAMING_SNAKE_CASE ) == list(_SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 244
| 1
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
class lowercase__ ( __A):
UpperCamelCase_ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]=125 , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE : Any = [f"""<extra_id_{i}>""" for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE : str = len(set(filter(lambda UpperCamelCase__ : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
SCREAMING_SNAKE_CASE : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
SCREAMING_SNAKE_CASE : Tuple = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = extra_ids
SCREAMING_SNAKE_CASE : Tuple = 2**8 # utf is 8 bits
# define special tokens dict
SCREAMING_SNAKE_CASE : Dict = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
SCREAMING_SNAKE_CASE : str = len(self.special_tokens_encoder )
SCREAMING_SNAKE_CASE : Optional[int] = len(_UpperCamelCase )
for i, token in enumerate(_UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = self.vocab_size + i - n
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self : Optional[int] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] ):
'''simple docstring'''
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE : Any = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __A ( self : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [chr(_UpperCamelCase ) for i in text.encode('''utf-8''' )]
return tokens
def __A ( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE : Any = self.added_tokens_encoder[token]
elif len(_UpperCamelCase ) != 1:
SCREAMING_SNAKE_CASE : List[str] = self.unk_token_id
else:
SCREAMING_SNAKE_CASE : List[str] = ord(_UpperCamelCase ) + self._num_special_tokens
return token_id
def __A ( self : int , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if index in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE : Any = self.special_tokens_decoder[index]
else:
SCREAMING_SNAKE_CASE : str = chr(index - self._num_special_tokens )
return token
def __A ( self : int , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE : Any = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
SCREAMING_SNAKE_CASE : List[Any] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE : int = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE : List[str] = token.encode('''utf-8''' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = bytes([ord(_UpperCamelCase )] )
bstring += tok_string
SCREAMING_SNAKE_CASE : List[Any] = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
return ()
| 182
|
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCAmelCase_ = '''Enter the base and the power separated by a comma: '''
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCAmelCase_ = res(xa, ya)
lowerCAmelCase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 8
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@register_to_config
def __init__( self : Dict , lowerCamelCase_ : List[Any] = 16 , lowerCamelCase_ : Optional[int] = 88 , lowerCamelCase_ : int = None , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : Tuple = 0.0 , lowerCamelCase_ : Optional[Any] = 32 , lowerCamelCase_ : Dict = None , lowerCamelCase_ : List[Any] = False , lowerCamelCase_ : str = None , lowerCamelCase_ : Optional[Any] = "geglu" , lowerCamelCase_ : List[str] = True , lowerCamelCase_ : Tuple = True , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = num_attention_heads
UpperCamelCase = attention_head_dim
UpperCamelCase = num_attention_heads * attention_head_dim
UpperCamelCase = in_channels
UpperCamelCase = torch.nn.GroupNorm(num_groups=A__ , num_channels=A__ , eps=1E-6 , affine=A__ )
UpperCamelCase = nn.Linear(A__ , A__ )
# 3. Define transformers blocks
UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
A__ , A__ , A__ , dropout=A__ , cross_attention_dim=A__ , activation_fn=A__ , attention_bias=A__ , double_self_attention=A__ , norm_elementwise_affine=A__ , )
for d in range(A__ )
] )
UpperCamelCase = nn.Linear(A__ , A__ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : str=1 , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Dict = True , ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = hidden_states.shape
UpperCamelCase = batch_frames // num_frames
UpperCamelCase = hidden_states
UpperCamelCase = hidden_states[None, :].reshape(A__ , A__ , A__ , A__ , A__ )
UpperCamelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
UpperCamelCase = self.norm(A__ )
UpperCamelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , A__ , A__ )
UpperCamelCase = self.proj_in(A__ )
# 2. Blocks
for block in self.transformer_blocks:
UpperCamelCase = block(
A__ , encoder_hidden_states=A__ , timestep=A__ , cross_attention_kwargs=A__ , class_labels=A__ , )
# 3. Output
UpperCamelCase = self.proj_out(A__ )
UpperCamelCase = (
hidden_states[None, None, :]
.reshape(A__ , A__ , A__ , A__ , A__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
UpperCamelCase = hidden_states.reshape(A__ , A__ , A__ , A__ )
UpperCamelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=A__ )
| 359
|
def lowercase( UpperCamelCase_ ) -> list[list]:
'''simple docstring'''
UpperCamelCase = current_set.copy()
for row_index, row in enumerate(UpperCamelCase_ ):
UpperCamelCase = row[0]
for column_index, column in enumerate(UpperCamelCase_ ):
if magnitude == 0:
UpperCamelCase = column
continue
UpperCamelCase = column / magnitude
# Subtract to cancel term
UpperCamelCase = current_set[0]
UpperCamelCase = [first_row]
UpperCamelCase = current_set[1::]
for row in current_set:
UpperCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(UpperCamelCase_ )
continue
for column_index in range(len(UpperCamelCase_ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(UpperCamelCase_ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCamelCase = final_set[0]
UpperCamelCase = []
UpperCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCamelCase = simplify(UpperCamelCase_ )
for i in range(len(UpperCamelCase_ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , UpperCamelCase_ )
UpperCamelCase = resultant
return final_set
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
UpperCamelCase = len(UpperCamelCase_ ) + 1
if any(len(UpperCamelCase_ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(UpperCamelCase_ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(UpperCamelCase_ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCamelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCamelCase = data_set.copy()
UpperCamelCase = []
for row_index, row in enumerate(UpperCamelCase_ ):
if 0 not in row:
UpperCamelCase = data_set.pop(UpperCamelCase_ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , UpperCamelCase_ )
UpperCamelCase = data_set.copy()
UpperCamelCase = simplify(UpperCamelCase_ )
UpperCamelCase = simplified[::-1]
UpperCamelCase = []
for row in simplified:
UpperCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCamelCase = row.copy()[: len(UpperCamelCase_ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(UpperCamelCase_ ) == 0:
solutions.append(0 )
continue
UpperCamelCase = temp_row[1::]
UpperCamelCase = temp_row[::-1]
for column_index, column in enumerate(UpperCamelCase_ ):
current_solution -= column * solutions[column_index]
solutions.append(UpperCamelCase_ )
UpperCamelCase = []
for item in solutions:
final.append(float(round(UpperCamelCase_ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 165
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
UpperCAmelCase_ : Optional[int] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a_ : str = torch.load(__A , map_location='cpu' )
return sd
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Union[str, Any] , __A : Dict=rename_keys_prefix ) -> List[Any]:
"""simple docstring"""
a_ : str = OrderedDict()
a_ : Optional[int] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
a_ : Optional[int] = key
for name_pair in rename_keys_prefix:
a_ : Union[str, Any] = new_key.replace(name_pair[0] , name_pair[1] )
a_ : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
a_ : Optional[Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : List[str] ) -> int:
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
a_ : Dict = 'pretraining'
if "vcr" in checkpoint_path:
a_ : Any = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
a_ : Optional[int] = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
a_ : str = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
a_ : List[Any] = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
a_ : Dict = {'visual_embedding_dim': 5_12}
a_ : Union[str, Any] = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
a_ : Union[str, Any] = {'visual_embedding_dim': 20_48}
a_ : int = 'vqa_advanced'
elif "vqa" in checkpoint_path:
a_ : int = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
a_ : Tuple = 'vqa'
elif "nlvr" in checkpoint_path:
a_ : Dict = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
a_ : int = 'nlvr'
a_ : List[str] = VisualBertConfig(**__A )
# Load State Dict
a_ : List[Any] = load_state_dict(__A )
a_ : str = get_new_dict(__A , __A )
if model_type == "pretraining":
a_ : int = VisualBertForPreTraining(__A )
elif model_type == "vqa":
a_ : str = VisualBertForQuestionAnswering(__A )
elif model_type == "nlvr":
a_ : Optional[Any] = VisualBertForVisualReasoning(__A )
elif model_type == "multichoice":
a_ : Tuple = VisualBertForMultipleChoice(__A )
model.load_state_dict(__A )
# Save Checkpoints
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : List[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 32
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] =logging.get_logger()
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : List[nn.Module] = field(default_factory=A__ )
_lowerCAmelCase : list = field(default_factory=A__ )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : nn.Module
_lowerCAmelCase : int = 0
_lowerCAmelCase : List = field(default_factory=A__ )
_lowerCAmelCase : List = field(default_factory=A__ )
def __call__( self : Any , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = Tracker(self.dest )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : Any = Tracker(self.src )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : str = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.src_skip , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.dest_skip , SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise Exception(
F'Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> Optional[int]:
print(f'Converting {name}...' )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
UpperCamelCase__ : List[Any] = ResNetForImageClassification(__lowerCAmelCase ).eval()
UpperCamelCase__ : Optional[Any] = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
UpperCamelCase__ : int = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase__ : List[Any] = f'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
UpperCamelCase__ : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> Dict:
UpperCamelCase__ : Dict = "imagenet-1k-id2label.json"
UpperCamelCase__ : Optional[int] = 1000
UpperCamelCase__ : Any = (1, num_labels)
UpperCamelCase__ : Union[str, Any] = "huggingface/label-files"
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : str = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ : Optional[int] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase : int =parser.parse_args()
lowerCamelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 189
| 0
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] ) -> List[Any]: # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class UpperCamelCase :
UpperCAmelCase : int
UpperCAmelCase : str
class UpperCamelCase ( lowercase ):
def _lowercase (self : str) -> Tuple:
__snake_case : Optional[int] = {}
__snake_case : Optional[int] = []
__snake_case : Union[str, Any] = 1
__snake_case : List[str] = [1, 2]
__snake_case : Dict = {'a': 1, 'b': 2}
__snake_case : Optional[Any] = {'a': [1, 2], 'b': [3, 4]}
__snake_case : str = {'a': {'1': 1}, 'b': 2}
__snake_case : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__snake_case : Optional[Any] = {}
__snake_case : Optional[int] = []
__snake_case : Optional[int] = 2
__snake_case : Optional[int] = [2, 3]
__snake_case : Optional[int] = {'a': 2, 'b': 3}
__snake_case : Any = {'a': [2, 3], 'b': [4, 5]}
__snake_case : List[str] = {'a': {'1': 2}, 'b': 3}
__snake_case : Optional[int] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
self.assertEqual(map_nested(_A , _A) , _A)
__snake_case : Tuple = 2
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
self.assertEqual(map_nested(_A , _A , num_proc=_A) , _A)
__snake_case : Optional[int] = {'a': np.eye(2), 'b': np.zeros(3), 'c': np.ones(2)}
__snake_case : Dict = {'a': 2, 'b': 0, 'c': 2}
__snake_case : List[str] = {
'a': np.eye(2).astype(_A),
'b': np.zeros(3).astype(_A),
'c': np.ones(2).astype(_A),
}
self.assertEqual(map_nested(_A , _A , map_numpy=_A) , _A)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_A , _A , map_numpy=_A).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_A , _A , map_numpy=_A , num_proc=_A) , _A)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_A , _A , map_numpy=_A , num_proc=_A).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_A): # can't pickle a local lambda
map_nested(lambda _A: x + 1 , _A , num_proc=_A)
def _lowercase (self : Dict) -> Union[str, Any]:
__snake_case : Union[str, Any] = {'a': 1, 'b': 2}
__snake_case : Tuple = {'a': 3, 'b': 4}
__snake_case : List[Any] = {'a': 5, 'b': 6}
__snake_case : Dict = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))])
self.assertEqual(sorted(zip_dict(_A , _A , _A)) , _A)
def _lowercase (self : List[Any]) -> str:
class UpperCamelCase :
UpperCAmelCase : Optional[int] = """bar"""
__snake_case : Optional[Any] = Foo()
self.assertEqual(foo.my_attr , 'bar')
with temporary_assignment(_A , 'my_attr' , 'BAR'):
self.assertEqual(foo.my_attr , 'BAR')
self.assertEqual(foo.my_attr , 'bar')
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> Dict:
'''simple docstring'''
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
__snake_case : int = {F"{i}": i for i in range(UpperCAmelCase_ )}
__snake_case : str = map_nested(lambda UpperCAmelCase_ : x + 10 , UpperCAmelCase_ , num_proc=UpperCAmelCase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase ( lowercase ):
@require_tf
def _lowercase (self : Union[str, Any]) -> Optional[int]:
import tensorflow as tf
from tensorflow.keras import layers
__snake_case : Optional[Any] = layers.Dense(2)
def gen_random_output():
__snake_case : int = tf.random.uniform((1, 3))
return model(_A).numpy()
with temp_seed(42 , set_tensorflow=_A):
__snake_case : Tuple = gen_random_output()
with temp_seed(42 , set_tensorflow=_A):
__snake_case : Union[str, Any] = gen_random_output()
__snake_case : str = gen_random_output()
np.testing.assert_equal(_A , _A)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def _lowercase (self : List[str]) -> List[Any]:
import torch
def gen_random_output():
__snake_case : Optional[Any] = torch.nn.Linear(3 , 2)
__snake_case : Dict = torch.rand(1 , 3)
return model(_A).detach().numpy()
with temp_seed(42 , set_pytorch=_A):
__snake_case : Optional[int] = gen_random_output()
with temp_seed(42 , set_pytorch=_A):
__snake_case : Optional[int] = gen_random_output()
__snake_case : Dict = gen_random_output()
np.testing.assert_equal(_A , _A)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def _lowercase (self : Dict) -> Optional[Any]:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
__snake_case : Dict = gen_random_output()
with temp_seed(42):
__snake_case : str = gen_random_output()
__snake_case : Optional[Any] = gen_random_output()
np.testing.assert_equal(_A , _A)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize('input_data' , [{}] )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = NestedDataStructure(UpperCAmelCase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = NestedDataStructure(UpperCAmelCase_ ).flatten()
assert output == expected_output
def __UpperCAmelCase ( ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = A(x=1 , y='foobar' )
__snake_case : int = {'x': 1, 'y': 'foobar'}
assert asdict(UpperCAmelCase_ ) == expected_output
__snake_case : Optional[int] = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]}
__snake_case : Union[str, Any] = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(UpperCAmelCase_ ) == expected_output
with pytest.raises(UpperCAmelCase_ ):
asdict([1, A(x=10 , y='foo' )] )
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
return text.split()
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
with Pool(2 ) as pool:
__snake_case : Any = list(iflatmap_unordered(UpperCAmelCase_ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(UpperCAmelCase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__snake_case : int = list(iflatmap_unordered(UpperCAmelCase_ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(UpperCAmelCase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__snake_case : Union[str, Any] = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase_ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(UpperCAmelCase_ ) == 4
| 95
|
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_a : int= "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a : Dict= BASE_URL + "/user"
# https://github.com/settings/tokens
_a : Union[str, Any]= os.environ.get("USER_TOKEN", "")
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> dict[Any, Any]:
'''simple docstring'''
__snake_case : Tuple = {
'Authorization': F"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 95
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( __lowerCAmelCase , unittest.TestCase ):
__magic_name__ = LDMTextToImagePipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
__magic_name__ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = False
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
A : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
A : Dict = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
A : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A : Union[str, Any] = CLIPTextModel(lowerCamelCase_ )
A : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Dict:
"""simple docstring"""
if str(lowerCamelCase_ ).startswith('''mps''' ):
A : Dict = torch.manual_seed(lowerCamelCase_ )
else:
A : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
A : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : str = self.get_dummy_components()
A : Tuple = LDMTextToImagePipeline(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A : List[str] = self.get_dummy_inputs(lowerCamelCase_ )
A : Tuple = pipe(**lowerCamelCase_ ).images
A : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A : Tuple = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=torch.floataa , SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
"""simple docstring"""
A : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
A : Any = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 32, 32) )
A : str = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
A : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A : str = self.get_inputs(lowerCamelCase_ )
A : Any = pipe(**lowerCamelCase_ ).images
A : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A : Optional[int] = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
A : List[Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=torch.floataa , SCREAMING_SNAKE_CASE=0 ) -> int:
"""simple docstring"""
A : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
A : Union[str, Any] = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 4, 32, 32) )
A : Union[str, Any] = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
A : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Dict = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A : Any = self.get_inputs(lowerCamelCase_ )
A : Any = pipe(**lowerCamelCase_ ).images[0]
A : Tuple = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
A : Dict = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 3
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = """swin"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )]
UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return 1E-4
| 343
| 0
|
import logging
import os
from .state import PartialState
class _lowerCamelCase ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : Dict = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
A_ : int = kwargs.pop('''main_process_only''' , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = kwargs.pop('''in_order''' , _SCREAMING_SNAKE_CASE )
if self.isEnabledFor(_SCREAMING_SNAKE_CASE ):
if self._should_log(_SCREAMING_SNAKE_CASE ):
A_ : Dict = self.process(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.logger.log(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif in_order:
A_ : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A_ : Dict = self.process(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.logger.log(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
if log_level is None:
A_ : Tuple = os.environ.get('''ACCELERATE_LOG_LEVEL''' , SCREAMING_SNAKE_CASE )
A_ : str = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 358
|
from manim import *
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Optional[Any] = Text('''CPU''' , font_size=24 )
A_ : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Dict = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = Text('''Model''' , font_size=24 )
A_ : List[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , )
A_ : int = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Dict = []
A_ : int = []
A_ : Optional[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A_ : Union[str, Any] = 0.4_6 / 4
A_ : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 65
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: int | float | str , _lowerCamelCase: int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
__lowerCamelCase : List[str] = int(_lowerCamelCase )
__lowerCamelCase : str = int(_lowerCamelCase )
__lowerCamelCase : list[str] = []
for temp in range(int(_lowerCamelCase ) ):
series.append(F"""1 / {pow(temp + 1 , int(_lowerCamelCase ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input('''Enter the last number (nth term) of the P-Series'''))
__A = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 135
|
"""simple docstring"""
import math
import random
def lowercase_ ( _lowerCamelCase: float , _lowerCamelCase: bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__A = 0.02
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int ) -> float:
'''simple docstring'''
__lowerCamelCase : Tuple = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_lowerCamelCase ):
# Forward propagation
__lowerCamelCase : List[Any] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__lowerCamelCase : Any = (expected / 100) - layer_a
# Error delta
__lowerCamelCase : Dict = layer_1_error * sigmoid_function(_lowerCamelCase , _lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input('''Expected value: '''))
__A = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 135
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :List[str] = logging.get_logger(__name__)
def _a ( _lowercase : Dict , _lowercase : Tuple=False ):
'''simple docstring'''
__UpperCAmelCase : int = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((F'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', F'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCAmelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def _a ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase : List[Any] = """"""
else:
__UpperCAmelCase : List[str] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase : Union[str, Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__UpperCAmelCase : List[str] = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
__UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _a ( _lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : int = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_a , _a )
def _a ( _lowercase : str , _lowercase : Optional[int] , _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = dct.pop(_a )
__UpperCAmelCase : List[Any] = val
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : int = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def _a ( _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : str=False ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
__UpperCAmelCase : Union[str, Any] = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1000 )
__UpperCAmelCase : Optional[int] = False
# load original model from timm
__UpperCAmelCase : Optional[int] = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase : str = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
__UpperCAmelCase : Optional[int] = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
__UpperCAmelCase : int = """huggingface/label-files"""
__UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json"""
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : Tuple = {int(_a ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = idalabel
__UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
__UpperCAmelCase : Tuple = ViTHybridModel(_a ).eval()
else:
__UpperCAmelCase : Tuple = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
__UpperCAmelCase : Optional[int] = create_transform(**resolve_data_config({} , model=_a ) )
__UpperCAmelCase : Optional[Any] = transform.transforms
__UpperCAmelCase : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__UpperCAmelCase : int = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase : List[Any] = prepare_img()
__UpperCAmelCase : Dict = transform(_a ).unsqueeze(0 )
__UpperCAmelCase : str = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
__UpperCAmelCase : Tuple = model(_a )
__UpperCAmelCase : Tuple = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
__UpperCAmelCase : List[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
__UpperCAmelCase : Tuple = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_a )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_a )
if push_to_hub:
print(F'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(F'ybelkada/{vit_name}' )
processor.push_to_hub(F'ybelkada/{vit_name}' )
if __name__ == "__main__":
__UpperCAmelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__UpperCAmelCase :List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase :str = logging.get_logger(__name__)
__UpperCAmelCase :int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__UpperCAmelCase :List[str] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__UpperCAmelCase :Optional[Any] = {"facebook/blenderbot-3B": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = set()
__UpperCAmelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Tuple = char
return pairs
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : str , snake_case : Dict , snake_case : str , snake_case : Optional[int]="replace" , snake_case : Tuple="<s>" , snake_case : Dict="</s>" , snake_case : Any="</s>" , snake_case : Any="<s>" , snake_case : List[Any]="<unk>" , snake_case : int="<pad>" , snake_case : List[Any]="<mask>" , snake_case : List[Any]=False , **snake_case : Any , ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
__UpperCAmelCase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
__UpperCAmelCase : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
__UpperCAmelCase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
__UpperCAmelCase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
__UpperCAmelCase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase : Tuple = json.load(snake_case )
__UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : List[str] = errors # how to handle errors in decoding
__UpperCAmelCase : Any = bytes_to_unicode()
__UpperCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase : int = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : str = dict(zip(snake_case , range(len(snake_case ) ) ) )
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Optional[Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
return len(self.encoder )
def lowerCamelCase__ ( self : Tuple ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : List[str] , snake_case : Optional[Any] ) -> int:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : Optional[Any] = tuple(snake_case )
__UpperCAmelCase : Optional[int] = get_pairs(snake_case )
if not pairs:
return token
while True:
__UpperCAmelCase : Optional[int] = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = bigram
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : List[str] = 0
while i < len(snake_case ):
try:
__UpperCAmelCase : Optional[Any] = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : Optional[Any] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Union[str, Any] = tuple(snake_case )
__UpperCAmelCase : Optional[Any] = new_word
if len(snake_case ) == 1:
break
else:
__UpperCAmelCase : Optional[int] = get_pairs(snake_case )
__UpperCAmelCase : str = ''' '''.join(snake_case )
__UpperCAmelCase : Optional[int] = word
return word
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] ) -> List[str]:
__UpperCAmelCase : List[Any] = []
for token in re.findall(self.pat , snake_case ):
__UpperCAmelCase : List[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase__ ( self : Dict , snake_case : List[str] ) -> Optional[int]:
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , snake_case : int ) -> Optional[int]:
return self.decoder.get(snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = ''''''.join(snake_case )
__UpperCAmelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : str = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase : int = os.path.join(
snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '''\n''' )
__UpperCAmelCase : Optional[int] = 0
with open(snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase : Union[str, Any] = token_index
writer.write(''' '''.join(snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[str] , snake_case : Dict=False , **snake_case : Union[str, Any] ) -> List[str]:
__UpperCAmelCase : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = ''' ''' + text
return (text, kwargs)
def lowerCamelCase__ ( self : Tuple , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> int:
return token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Dict , snake_case : "Conversation" ) -> List[int]:
__UpperCAmelCase : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case )
__UpperCAmelCase : List[str] = ''' '''.join(snake_case )
__UpperCAmelCase : Tuple = self.encode(snake_case )
if len(snake_case ) > self.model_max_length:
__UpperCAmelCase : List[str] = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 240
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "marian"
__SCREAMING_SNAKE_CASE = ["past_key_values"]
__SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __lowerCamelCase=5_8_1_0_1 , __lowerCamelCase=None , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=1_2 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_6 , __lowerCamelCase=1_2 , __lowerCamelCase=4_0_9_6 , __lowerCamelCase=1_6 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase="gelu" , __lowerCamelCase=1_0_2_4 , __lowerCamelCase=0.1 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=5_8_1_0_0 , __lowerCamelCase=False , __lowerCamelCase=5_8_1_0_0 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=True , **__lowerCamelCase , ) -> List[str]:
_A : int = vocab_size
_A : Tuple = decoder_vocab_size or vocab_size
_A : Tuple = max_position_embeddings
_A : Optional[Any] = d_model
_A : List[Any] = encoder_ffn_dim
_A : Optional[int] = encoder_layers
_A : Any = encoder_attention_heads
_A : Dict = decoder_ffn_dim
_A : Any = decoder_layers
_A : str = decoder_attention_heads
_A : Optional[Any] = dropout
_A : Optional[Any] = attention_dropout
_A : Dict = activation_dropout
_A : Any = activation_function
_A : Any = init_std
_A : str = encoder_layerdrop
_A : Tuple = decoder_layerdrop
_A : List[Any] = use_cache
_A : Optional[Any] = encoder_layers
_A : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_A : Dict = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , forced_eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
class lowerCAmelCase__ ( a):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_A : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
_A : Tuple = {0: "batch"}
_A : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A : Dict = {0: "batch", 1: "decoder_sequence"}
_A : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
_A : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
_A , _A : int = self.num_layers
for i in range(__lowerCamelCase):
_A : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
_A : str = {0: "batch", 2: "past_sequence + sequence"}
else:
_A : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_A : Union[str, Any] = super().outputs
else:
_A : List[Any] = super(__lowerCamelCase , self).outputs
if self.use_past:
_A , _A : Union[str, Any] = self.num_layers
for i in range(__lowerCamelCase):
_A : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
_A : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Any = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# Generate decoder inputs
_A : Dict = seq_length if not self.use_past else 1
_A : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Tuple = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_A : List[str] = dict(**__lowerCamelCase , **__lowerCamelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : Dict = common_inputs["input_ids"].shape
_A : Optional[int] = common_inputs["decoder_input_ids"].shape[1]
_A , _A : int = self.num_attention_heads
_A : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A : Tuple = decoder_seq_length + 3
_A : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_A : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase)] , dim=1)
_A : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_A , _A : str = self.num_layers
_A : Tuple = min(__lowerCamelCase , __lowerCamelCase)
_A : Optional[Any] = max(__lowerCamelCase , __lowerCamelCase) - min_num_layers
_A : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__lowerCamelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase),
torch.zeros(__lowerCamelCase),
torch.zeros(__lowerCamelCase),
torch.zeros(__lowerCamelCase),
))
# TODO: test this.
_A : Dict = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__lowerCamelCase , __lowerCamelCase):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)))
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
_A : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_A , _A : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A : Dict = seqlen + 2
_A , _A : Any = self.num_layers
_A , _A : Dict = self.num_attention_heads
_A : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_A : Union[str, Any] = common_inputs["attention_mask"].dtype
_A : Any = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase)] , dim=1)
_A : List[Any] = [
(torch.zeros(__lowerCamelCase), torch.zeros(__lowerCamelCase)) for _ in range(__lowerCamelCase)
]
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_A : Union[str, Any] = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_A : Optional[int] = tokenizer.num_special_tokens_to_add(__lowerCamelCase)
_A : Dict = compute_effective_axis_dimension(
__lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCamelCase)
# Generate dummy inputs according to compute batch and sequence
_A : List[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
_A : Dict = dict(tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase))
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = -1 , __lowerCamelCase = -1 , __lowerCamelCase = False , __lowerCamelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_A : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
else:
_A : Any = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase)
return common_inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[str]:
if self.task in ["default", "seq2seq-lm"]:
_A : str = super()._flatten_past_key_values_(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
else:
_A : Tuple = super(__lowerCamelCase , self)._flatten_past_key_values_(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
@property
def _lowerCamelCase ( self) -> float:
return 1e-4
| 11
|
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11
| 1
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase_ = len(UpperCAmelCase ) - 1
def A__ ( self , UpperCAmelCase ) -> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase ) , 5 ) == 1
return output_values
def A__ ( self , UpperCAmelCase ) -> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase_ = self.basis_function(UpperCAmelCase )
lowercase_ = 0.0
lowercase_ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def A__ ( self , UpperCAmelCase = 0.01 ) -> Dict:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
lowercase_ = [] # x coordinates of points to plot
lowercase_ = [] # y coordinates of points to plot
lowercase_ = 0.0
while t <= 1:
lowercase_ = self.bezier_curve_function(UpperCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase_ = [i[0] for i in self.list_of_points]
lowercase_ = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase , UpperCAmelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(UpperCAmelCase , UpperCAmelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 297
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[str] ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_a = str(bin(lowercase ) )[2:] # remove the leading "0b"
_a = str(bin(lowercase ) )[2:]
_a = max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 243
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase_( torch.nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase="sayef/fsner-bert-base-uncased" ) -> Any:
super(__UpperCAmelCase ,self ).__init__()
lowerCAmelCase__ : Tuple = AutoModel.from_pretrained(__UpperCAmelCase ,return_dict=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = torch.nn.CosineSimilarity(3 ,1E-08 )
lowerCAmelCase__ : Optional[Any] = torch.nn.Softmax(dim=1 )
def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str:
return self.bert(**__UpperCAmelCase ).last_hidden_state
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
return token_embeddings.sum(2 ,keepdim=__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=1 ) -> Optional[int]:
return self.softmax(T * self.cos(__UpperCAmelCase ,__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = W_supports["""sizes"""].tolist()
lowerCAmelCase__ : Dict = W_supports["""start_token_id"""].item()
lowerCAmelCase__ : Optional[Any] = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase__ : str = self.BERT(**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = self.BERT(**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Any = W_supports["""input_ids"""] == start_token_id
lowerCAmelCase__ : List[Any] = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(__UpperCAmelCase ):
if i == 0:
lowerCAmelCase__ : Tuple = 0
else:
lowerCAmelCase__ : Optional[Any] = support_sizes[i - 1]
lowerCAmelCase__ : List[str] = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase__ : List[Any] = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase__ : Optional[Any] = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase__ : Tuple = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase__ : List[str] = torch.vstack((p_starts, p_start) )
lowerCAmelCase__ : Union[str, Any] = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase__ : List[Any] = p_start
lowerCAmelCase__ : Optional[Any] = p_end
return p_starts, p_ends
| 184
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase , UpperCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase__ : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
lowerCAmelCase__ : Tuple = answer
return answer
lowerCAmelCase__ : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = [0] * (target + 1)
lowerCAmelCase__ : List[Any] = 1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 184
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = BlenderbotSmallTokenizer
_snake_case : List[Any] = False
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_UpperCamelCase = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase__ ) )
def snake_case__ ( self : List[str] , **lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''adapt act apte'''
_UpperCamelCase = '''adapt act apte'''
return input_text, output_text
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase = '''adapt act apte'''
_UpperCamelCase = ['''adapt''', '''act''', '''ap@@''', '''te''']
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_UpperCamelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
_UpperCamelCase = '''I am a small frog.'''
_UpperCamelCase = tok([src_text] , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )['''input_ids''']
_UpperCamelCase = tok.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_UpperCamelCase = '''I am a small frog .'''
_UpperCamelCase = '''.'''
_UpperCamelCase = tok(lowerCAmelCase__ )['''input_ids''']
_UpperCamelCase = tok(lowerCAmelCase__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 324
|
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324
| 1
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowerCAmelCase : int ="""bert-base-cased"""
__lowerCAmelCase : Dict ="""google/pegasus-xsum"""
__lowerCAmelCase : str =[""" Sam ate lunch today.""", """Sams lunch ingredients."""]
__lowerCAmelCase : Optional[Any] =["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
__lowerCAmelCase : Optional[Any] ="""patrickvonplaten/t5-tiny-random"""
__lowerCAmelCase : List[Any] ="""sshleifer/bart-tiny-random"""
__lowerCAmelCase : Union[str, Any] ="""sshleifer/tiny-mbart"""
__lowerCAmelCase : List[str] ="""sshleifer/tiny-marian-en-de"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Path , lowerCAmelCase__ :list ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """\n""".join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open("""w""" ).writelines(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , f'{split}.source' ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , f'{split}.target' ) , lowerCAmelCase__ )
return tmp_dir
class _A ( lowerCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
lowercase = 4
lowercase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowercase , lowercase = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path="""train""" , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , )
lowercase = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowercase = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
lowercase = 4
lowercase = LegacySeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path="""train""" , max_source_length=20 , max_target_length=__lowerCAmelCase , )
lowercase = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowercase = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__lowerCAmelCase , __lowerCAmelCase , 128 , __lowerCAmelCase )
lowercase = {x.name for x in tmp_dir.iterdir()}
lowercase = {x.name for x in save_dir.iterdir()}
lowercase = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowerCAmelCase ) < len(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__lowerCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def A__ ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
lowercase , lowercase , lowercase = self._get_dataset(max_len=64 )
lowercase = 64
lowercase = ds.make_dynamic_sampler(__lowerCAmelCase , required_batch_size_multiple=__lowerCAmelCase )
lowercase = [len(__lowerCAmelCase ) for x in batch_sampler]
assert len(set(__lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowerCAmelCase ) == len(__lowerCAmelCase ) # no dropped or added examples
lowercase = DataLoader(__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
lowercase = []
lowercase = []
for batch in data_loader:
lowercase = batch["""input_ids"""].shape
lowercase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowercase = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(__lowerCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowerCAmelCase )
assert num_src_per_batch[0] == max(__lowerCAmelCase )
if failures:
raise AssertionError(f'too many tokens in {len(__lowerCAmelCase )} batches' )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase , lowercase = self._get_dataset(max_len=512 )
lowercase = 2
lowercase = ds.make_sortish_sampler(__lowerCAmelCase , shuffle=__lowerCAmelCase )
lowercase = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
lowercase = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCAmelCase )
lowercase = tokenizer.pad_token_id
def count_pad_tokens(__lowerCAmelCase , __lowerCAmelCase="input_ids" ):
return [batch[k].eq(__lowerCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowerCAmelCase , k="""labels""" ) ) < sum(count_pad_tokens(__lowerCAmelCase , k="""labels""" ) )
assert sum(count_pad_tokens(__lowerCAmelCase ) ) < sum(count_pad_tokens(__lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase=1000 , __lowerCAmelCase=128 ):
"""simple docstring"""
if os.getenv("""USE_REAL_DATA""" , __lowerCAmelCase ):
lowercase = """examples/seq2seq/wmt_en_ro"""
lowercase = max_len * 2 * 64
if not Path(__lowerCAmelCase ).joinpath("""train.len""" ).exists():
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
else:
lowercase = """examples/seq2seq/test_data/wmt_en_ro"""
lowercase = max_len * 4
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path="""train""" , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , n_obs=__lowerCAmelCase , )
return ds, max_tokens, tokenizer
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase , lowercase = self._get_dataset()
lowercase = set(DistributedSortishSampler(__lowerCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCAmelCase ) )
lowercase = set(DistributedSortishSampler(__lowerCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCAmelCase ) )
assert idsa.intersection(__lowerCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
if tok_name == MBART_TINY:
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowercase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowercase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(__lowerCAmelCase ) == 0
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
UpperCAmelCase_ :Optional[Any] = ['''pixel_values''']
def __init__( self , __A = True , __A = None , __A = 0.9 , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = 1 / 255 , __A = True , __A = True , __A = None , __A = None , **__A , ) -> str:
super().__init__(**_A )
lowerCAmelCase_ :List[str] = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase_ :Union[str, Any] = get_size_dict(_A , default_to_square=_A )
lowerCAmelCase_ :Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase_ :Optional[int] = get_size_dict(_A , param_name="""crop_size""" )
lowerCAmelCase_ :str = do_resize
lowerCAmelCase_ :Tuple = size
lowerCAmelCase_ :int = crop_pct
lowerCAmelCase_ :Any = resample
lowerCAmelCase_ :Any = do_center_crop
lowerCAmelCase_ :List[str] = crop_size
lowerCAmelCase_ :List[str] = do_rescale
lowerCAmelCase_ :str = rescale_factor
lowerCAmelCase_ :Union[str, Any] = do_normalize
lowerCAmelCase_ :Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ :Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> Any:
lowerCAmelCase_ :Dict = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCAmelCase_ :Optional[Any] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCAmelCase_ :Dict = int(size["""height"""] / crop_pct )
else:
lowerCAmelCase_ :Optional[int] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(_A ) )
lowerCAmelCase_ :Union[str, Any] = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
else:
if "shortest_edge" in size:
lowerCAmelCase_ :str = get_resize_output_image_size(_A , size=size["""shortest_edge"""] , default_to_square=_A )
elif "height" in size and "width" in size:
lowerCAmelCase_ :List[str] = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(_A ) )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> List[str]:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> Tuple:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ :Optional[int] = resample if resample is not None else self.resample
lowerCAmelCase_ :int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ :List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :Union[str, Any] = size if size is not None else self.size
lowerCAmelCase_ :Optional[int] = get_size_dict(_A , default_to_square=_A )
lowerCAmelCase_ :List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ :Union[str, Any] = get_size_dict(_A , param_name="""crop_size""" )
lowerCAmelCase_ :List[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :Tuple = [to_numpy_array(_A ) for image in images]
if do_resize:
lowerCAmelCase_ :Dict = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_center_crop:
lowerCAmelCase_ :Dict = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
lowerCAmelCase_ :List[str] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
lowerCAmelCase_ :List[str] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
lowerCAmelCase_ :List[str] = [to_channel_dimension_format(_A , _A ) for image in images]
lowerCAmelCase_ :Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_A , tensor_type=_A )
| 84
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Optional[Any] ={
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''MobileNetV2FeatureExtractor''']
lowerCAmelCase__ : str =['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 257
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Dict = ["image_processor", "tokenizer"]
__UpperCAmelCase : int = "LayoutLMv2ImageProcessor"
__UpperCAmelCase : List[Any] = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Any, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : Optional[Any]=None, **UpperCAmelCase__ : Dict ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", UpperCAmelCase__, )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase__, UpperCAmelCase__ )
def __call__( self : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, UpperCAmelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, UpperCAmelCase__ : Union[List[List[int]], List[List[List[int]]]] = None, UpperCAmelCase__ : Optional[Union[List[int], List[List[int]]]] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False, UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : bool = False, UpperCAmelCase__ : bool = False, UpperCAmelCase__ : bool = False, UpperCAmelCase__ : bool = False, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, **UpperCAmelCase__ : Tuple, ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
__lowercase = self.image_processor(images=UpperCAmelCase__, return_tensors=UpperCAmelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase = features["words"]
__lowercase = self.tokenizer(
text=text if text is not None else features["words"], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features["boxes"], word_labels=UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, padding=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=UpperCAmelCase__, stride=UpperCAmelCase__, pad_to_multiple_of=UpperCAmelCase__, return_token_type_ids=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, return_overflowing_tokens=UpperCAmelCase__, return_special_tokens_mask=UpperCAmelCase__, return_offsets_mapping=UpperCAmelCase__, return_length=UpperCAmelCase__, verbose=UpperCAmelCase__, return_tensors=UpperCAmelCase__, **UpperCAmelCase__, )
# add pixel values
__lowercase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__lowercase = self.get_overflowing_images(UpperCAmelCase__, encoded_inputs["overflow_to_sample_mapping"] )
__lowercase = images
return encoded_inputs
def _lowercase ( self : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[int] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowercase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(UpperCAmelCase__ )} and {len(UpperCAmelCase__ )}""" )
return images_with_overflow
def _lowercase ( self : Optional[int], *UpperCAmelCase__ : List[str], **UpperCAmelCase__ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], *UpperCAmelCase__ : Optional[Any], **UpperCAmelCase__ : List[Any] ):
return self.tokenizer.decode(*UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : List[Any] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowercase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", UpperCAmelCase__, )
return self.image_processor_class
@property
def _lowercase ( self : List[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", UpperCAmelCase__, )
return self.image_processor
| 144
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[Any] ):
__lowercase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
__lowercase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]], dtype=tf.intaa, ) # J'aime le camembert !"
__lowercase = model(UpperCAmelCase__ )["last_hidden_state"]
__lowercase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape, UpperCAmelCase__ )
# compare the actual values for a slice.
__lowercase = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]], dtype=tf.floataa, )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 144
| 1
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = 0 # The first color of the flag.
_SCREAMING_SNAKE_CASE : Dict = 1 # The second color of the flag.
_SCREAMING_SNAKE_CASE : Tuple = 2 # The third color of the flag.
_SCREAMING_SNAKE_CASE : Union[str, Any] = (red, white, blue)
def UpperCamelCase_( snake_case : list ):
'''simple docstring'''
if not sequence:
return []
if len(snake_case ) == 1:
return list(snake_case )
snake_case_ = 0
snake_case_ = len(snake_case ) - 1
snake_case_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case_ , snake_case_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case_ , snake_case_ = sequence[high], sequence[mid]
high -= 1
else:
snake_case_ = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : Optional[int] = input("Enter numbers separated by commas:\n").strip()
_SCREAMING_SNAKE_CASE : Tuple = [int(item.strip()) for item in user_input.split(",")]
print(F"{dutch_national_flag_sort(unsorted)}")
| 85
|
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __A ( ) -> Optional[Any]:
a = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
return image
def __A ( __lowerCamelCase ) -> List[Any]:
a = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = dct.pop(__lowerCamelCase )
a = val
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
a = torch.cat((q_bias, torch.zeros_like(__lowerCamelCase , requires_grad=__lowerCamelCase ), v_bias) )
a = qkv_bias
def __A ( __lowerCamelCase ) -> Any:
a = 364 if """coco""" in model_name else 224
a = InstructBlipVisionConfig(image_size=__lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
a = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
a = InstructBlipConfig(vision_config=__lowerCamelCase , text_config=__lowerCamelCase , qformer_config=__lowerCamelCase )
return config, image_size
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=False ) -> Dict:
a = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
a = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
a , a = get_blipa_config(__lowerCamelCase )
a = InstructBlipForConditionalGeneration(__lowerCamelCase ).eval()
a = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
a , a = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
a = """cuda:1""" if torch.cuda.is_available() else """cpu"""
a = """cuda:2""" if torch.cuda.is_available() else """cpu"""
a , a , a = load_model_and_preprocess(
name=__lowerCamelCase , model_type=__lowerCamelCase , is_eval=__lowerCamelCase , device=__lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
a = original_model.state_dict()
a = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a = state_dict.pop(__lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
a = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
a = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
a = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
a = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
a = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
a = key.replace("""t5""" , """language""" )
a = val
# read in qv biases
read_in_q_v_bias(__lowerCamelCase , __lowerCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
a = load_demo_image()
a = """What is unusual about this image?"""
# create processor
a = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__lowerCamelCase , image_std=__lowerCamelCase )
a = InstructBlipProcessor(
image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase , qformer_tokenizer=__lowerCamelCase , )
a = processor(images=__lowerCamelCase , text=__lowerCamelCase , return_tensors="""pt""" ).to(__lowerCamelCase )
# make sure processor creates exact same pixel values
a = vis_processors["""eval"""](__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
a = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowerCamelCase )
original_model.to(__lowerCamelCase )
hf_model.to(__lowerCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
a = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
a = hf_model(**__lowerCamelCase ).logits
else:
a = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
a = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(__lowerCamelCase )
a = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
a = hf_model(**__lowerCamelCase , labels=__lowerCamelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a = 1E-4 if """vicuna""" in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __lowerCamelCase , atol=__lowerCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
a = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
a = hf_model.generate(
**__lowerCamelCase , do_sample=__lowerCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a = 2
print("""Original generation:""" , __lowerCamelCase )
a = processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
a = [text.strip() for text in output_text]
print("""HF generation:""" , __lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 347
|
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347
| 1
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = IFImgaImgSuperResolutionPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
__a = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase ( self : Dict ):
return self._get_superresolution_dummy_components()
def lowercase ( self : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
_snake_case = torch.manual_seed(_lowerCamelCase )
else:
_snake_case = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
_snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
_snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self : Union[str, Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def lowercase ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self : Any ):
self._test_save_load_local()
def lowercase ( self : List[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 288
|
"""simple docstring"""
from math import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 288
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase : Dict = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def __UpperCAmelCase ( a_ , a_=None , a_=None , a_=None):
snake_case_ = True
while ask_again:
snake_case_ = input(a_)
try:
if default is not None and len(a_) == 0:
return default
return convert_value(a_) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a_)
def __UpperCAmelCase ( a_ , a_=[] , a_=None , a_=0):
snake_case_ = BulletMenu(a_ , a_)
snake_case_ = menu.run(default_choice=a_)
return convert_value(a_) if convert_value is not None else result
def __UpperCAmelCase ( a_):
snake_case_ = int(a_)
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value])
def __UpperCAmelCase ( a_):
snake_case_ = int(a_)
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value])
def __UpperCAmelCase ( a_):
snake_case_ = int(a_)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def __UpperCAmelCase ( a_):
snake_case_ = int(a_)
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value])
def __UpperCAmelCase ( a_):
snake_case_ = int(a_)
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value])
def __UpperCAmelCase ( a_):
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a , a ) -> Union[str, Any]:
snake_case_ = super()._format_usage(a , a , a , a )
snake_case_ = usage.replace('<command> [<args>] ' , '' )
return usage
| 178
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def __UpperCAmelCase ( a_ , a_ , a_=8):
snake_case_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a , a , ) -> Tuple:
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self , a , a , a , a , a , a ) -> Any:
if latents is None:
snake_case_ = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(a )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self , a=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
snake_case_ = torch.device(F'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def _UpperCamelCase ( self , a=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
snake_case_ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self ) -> Any:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self , a , a , a , a = 5_12 , a = 5_12 , a = 1_00 , a = 4.0 , a = 1 , a = None , a = None , a = "pil" , a = True , ) -> List[str]:
snake_case_ = self._execution_device
snake_case_ = guidance_scale > 1.0
if isinstance(a , a ):
snake_case_ = torch.cat(a , dim=0 )
if isinstance(a , a ):
snake_case_ = torch.cat(a , dim=0 )
if isinstance(a , a ):
snake_case_ = torch.cat(a , dim=0 )
snake_case_ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(a , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(a , dim=0 )
snake_case_ = hint.repeat_interleave(a , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
snake_case_ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.movq.config.latent_channels
snake_case_ , snake_case_ = downscale_height_and_width(a , a , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {'image_embeds': image_embeds, 'hint': hint}
snake_case_ = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
snake_case_ = self.movq.decode(a , force_not_quantize=a )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 178
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A_ ( A__ ) -> List[str]:
a__ : List[Any] = {}
a__ : Optional[Any] = tokenizer(example['content'] , truncation=A__ )['input_ids']
a__ : Union[str, Any] = len(example['content'] ) / len(output['input_ids'] )
return output
lowercase : Optional[Any] = HfArgumentParser(PretokenizationArguments)
lowercase : str = parser.parse_args()
if args.num_workers is None:
lowercase : str = multiprocessing.cpu_count()
lowercase : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase : str = time.time()
lowercase : Dict = load_dataset(args.dataset_name, split="""train""")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowercase : Optional[Any] = time.time()
lowercase : Dict = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowercase : int = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 350
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__UpperCAmelCase )} )
__A : str = field(
default=__UpperCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
__A : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
__A : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__A : bool = field(
default=__UpperCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__A : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__A : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__A : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
__A : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = '''train'''
__A : Tuple = '''dev'''
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : SquadDataTrainingArguments
__A : List[SquadFeatures]
__A : Split
__A : bool
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> str:
'''simple docstring'''
a__ : List[str] = args
a__ : Any = is_language_sensitive
a__ : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase):
try:
a__ : Optional[int] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
a__ : List[Any] = mode
# Load data features from cache or dataset file
a__ : List[str] = 'v2' if args.version_2_with_negative else 'v1'
a__ : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ : List[Any] = cached_features_file + '.lock'
with FileLock(lowercase):
if os.path.exists(lowercase) and not args.overwrite_cache:
a__ : List[Any] = time.time()
a__ : Optional[Any] = torch.load(lowercase)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
a__ : List[str] = self.old_features['features']
a__ : int = self.old_features.get('dataset' , lowercase)
a__ : Dict = self.old_features.get('examples' , lowercase)
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
' future run')
else:
if mode == Split.dev:
a__ : Dict = self.processor.get_dev_examples(args.data_dir)
else:
a__ : int = self.processor.get_train_examples(args.data_dir)
a__ , a__ : Any = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
a__ : Tuple = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]')
def __len__( self) -> Tuple:
'''simple docstring'''
return len(self.features)
def __getitem__( self , lowercase) -> Dict[str, torch.Tensor]:
'''simple docstring'''
a__ : Union[str, Any] = self.features[i]
a__ : Optional[int] = torch.tensor(feature.input_ids , dtype=torch.long)
a__ : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long)
a__ : Optional[Any] = torch.tensor(feature.token_type_ids , dtype=torch.long)
a__ : str = torch.tensor(feature.cls_index , dtype=torch.long)
a__ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float)
a__ : Tuple = torch.tensor(feature.is_impossible , dtype=torch.float)
a__ : str = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask})
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible})
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
a__ : Dict = torch.tensor(feature.start_position , dtype=torch.long)
a__ : str = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({'start_positions': start_positions, 'end_positions': end_positions})
return inputs
| 225
| 0
|
"""simple docstring"""
lowercase_ = [0, 2, 4, 6, 8]
lowercase_ = [1, 3, 5, 7, 9]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A = 0
for digit in range(1_0 ):
__A = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , __UpperCamelCase , __UpperCamelCase )
return result
__A = 0
for digita in range(1_0 ):
__A = digita
if (remainder + digita) % 2 == 0:
__A = ODD_DIGITS
else:
__A = EVEN_DIGITS
for digita in other_parity_digits:
__A = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , __UpperCamelCase , __UpperCamelCase , )
return result
def lowerCAmelCase ( __UpperCamelCase = 9 ):
"""simple docstring"""
__A = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCamelCase , 0 , [0] * length , __UpperCamelCase )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 266
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class snake_case :
'''simple docstring'''
def __init__( self : Optional[int], _lowerCamelCase : Optional[int]=2, _lowerCamelCase : Optional[int]=3, _lowerCamelCase : int=64, _lowerCamelCase : List[str]=None ):
'''simple docstring'''
__A = np.random.default_rng(_lowerCamelCase )
__A = length
__A = rng.normal(size=(length,) ).astype(np.floataa )
__A = a * self.x + b + rng.normal(scale=0.1, size=(length,) ).astype(np.floataa )
def __len__( self : str ):
'''simple docstring'''
return self.length
def __getitem__( self : Dict, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Tuple=0, _lowerCamelCase : Any=0, _lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a[0] + self.b[0]
class snake_case ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : Optional[Any]=0, _lowerCamelCase : Any=0, _lowerCamelCase : List[Any]=False ):
'''simple docstring'''
super().__init__()
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = torch.nn.Parameter(torch.tensor(_lowerCamelCase ).float() )
__A = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[str]=None ):
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__A = False
return x * self.a + self.b
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = 1_6 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__A = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__A = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
__A = load_dataset('''csv''' , data_files=__UpperCamelCase )
__A = datasets['''train'''].unique('''label''' )
__A = {v: i for i, v in enumerate(__UpperCamelCase )}
def tokenize_function(__UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' )
if "label" in examples:
__A = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__A = DataLoader(tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=2 )
__A = DataLoader(tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 266
| 1
|
import argparse
from collections import defaultdict
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> Any:
"""simple docstring"""
__A = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(_snake_case , "r" ) as f:
__A = f.readlines()
__A = F'''class {class_name}('''
__A = F'''{4 * ' '}def {test_name}('''
__A = F'''{8 * ' '}{correct_line.split()[0]}'''
__A = F'''{1_6 * ' '}{correct_line.split()[0]}'''
__A = False
__A = False
__A = False
__A = False
__A = 0
__A = 0
__A = []
for line in lines:
if line.startswith(_snake_case ):
__A = True
elif in_class and line.startswith(_snake_case ):
__A = True
elif in_class and in_func and (line.startswith(_snake_case ) or line.startswith(_snake_case )):
__A = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__A = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__A = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * ' '}{correct_line}''' )
__A = False
else:
new_lines.append(_snake_case )
with open(_snake_case , "w" ) as f:
for line in new_lines:
f.write(_snake_case )
def UpperCAmelCase ( a_ , a_=None ) -> List[Any]:
"""simple docstring"""
if fail is not None:
with open(_snake_case , "r" ) as f:
__A = {l.strip() for l in f.readlines()}
else:
__A = None
with open(_snake_case , "r" ) as f:
__A = f.readlines()
__A = defaultdict(_snake_case )
for line in correct_lines:
__A = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Any = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 352
|
def UpperCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
SCREAMING_SNAKE_CASE :List[str] = generate_large_matrix()
SCREAMING_SNAKE_CASE :str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
assert all(row == sorted(a_ , reverse=a_ ) for row in grid )
assert all(list(a_ ) == sorted(a_ , reverse=a_ ) for col in zip(*a_ ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(a_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__A = (left + right) // 2
__A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__A = mid + 1
else:
__A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(a_ )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
__A = len(grid[0] )
for i in range(len(a_ ) ):
__A = find_negative_index(grid[i][:bound] )
total += bound
return (len(a_ ) * len(grid[0] )) - total
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = 0
for row in grid:
for i, number in enumerate(a_ ):
if number < 0:
total += len(a_ ) - i
break
return total
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__A = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__A = timeit(F'''{func}(grid=grid)''' , setup=a_ , number=5_0_0 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase__ :int = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
UpperCamelCase__ :Union[str, Any] = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :List[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :str = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :Any = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :Dict = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[str] = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Optional[int] = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :List[Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Any = '''lower newer'''
UpperCamelCase__ :Optional[Any] = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Tuple = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = '''lower newer'''
UpperCamelCase__ :Optional[int] = self.prepare_image_inputs()
UpperCamelCase__ :Optional[int] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.get_image_processor()
UpperCamelCase__ :List[str] = self.get_tokenizer()
UpperCamelCase__ :Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :List[str] = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :Any = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :int = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :List[str] = '''lower newer'''
UpperCamelCase__ :Tuple = self.prepare_image_inputs()
UpperCamelCase__ :Dict = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 97
|
"""simple docstring"""
def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list:
'''simple docstring'''
_A = length or len(_snake_case )
_A = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_A , _A = list_data[i + 1], list_data[i]
_A = True
return list_data if not swapped else bubble_sort(_snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/test_sentencepiece.model""")
__SCREAMING_SNAKE_CASE = {"""target_lang""": """fi""", """source_lang""": """en"""}
__SCREAMING_SNAKE_CASE = """>>zh<<"""
__SCREAMING_SNAKE_CASE = """Helsinki-NLP/"""
if is_torch_available():
__SCREAMING_SNAKE_CASE = """pt"""
elif is_tf_available():
__SCREAMING_SNAKE_CASE = """tf"""
else:
__SCREAMING_SNAKE_CASE = """jax"""
@require_sentencepiece
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = MarianTokenizer
a__ = False
a__ = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
super().setUp()
A : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
A : Tuple = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
A : int = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any , **__lowerCamelCase : Dict ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Dict:
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
A : str = "</s>"
A : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
A : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__lowerCamelCase ) , 9 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
A : Tuple = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
A : str = en_de_tokenizer(["I am a small frog"] , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
A : Optional[int] = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(__lowerCamelCase , batch.input_ids[0] )
A : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCamelCase )
A : str = [x.name for x in Path(__lowerCamelCase ).glob("*" )]
self.assertIn("source.spm" , __lowerCamelCase )
MarianTokenizer.from_pretrained(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
A : Dict = self.get_tokenizer()
A : Any = tok(
["I am a small frog" * 10_00, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_12) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
A : str = self.get_tokenizer()
A : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
# fmt: off
A : List[str] = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
A : Optional[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
A : Optional[Any] = "Tämä on testi"
A : Union[str, Any] = "This is a test"
A : Optional[int] = [76, 7, 20_47, 2]
A : int = [69, 12, 11, 9_40, 2]
A : Any = tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A : str = tokenizer(text_target=__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A : List[str] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 256
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Any = set()
A : int = []
def parse_line(_lowerCamelCase ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A : Any = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
A : Union[str, Any] = "\n".join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
A : Union[str, Any] = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
A : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Tuple = set()
A : Union[str, Any] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCAmelCase ( _lowerCamelCase ):
return values.split("," )
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__SCREAMING_SNAKE_CASE = extract_warnings(args.output_dir, args.targets)
__SCREAMING_SNAKE_CASE = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 256
| 1
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( _lowercase ):
'''simple docstring'''
__lowerCAmelCase = ""
__lowerCAmelCase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(self , **_UpperCAmelCase )
__a : List[str] = repo_info
__a : Optional[int] = token
__a : int = None
def _lowerCamelCase ( self ):
if self.dir_cache is None:
__a : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : Union[str, Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_UpperCAmelCase ): {'''name''': str(_UpperCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "rb" , **_UpperCAmelCase , ):
if not isinstance(self.repo_info , _UpperCAmelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Optional[int] = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
self._get_dirs()
__a : Dict = self._strip_protocol(_UpperCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
self._get_dirs()
__a : Any = PurePosixPath(path.strip('''/''' ) )
__a : List[Any] = {}
for p, f in self.dir_cache.items():
__a : Union[str, Any] = PurePosixPath(p.strip('''/''' ) )
__a : Union[str, Any] = p.parent
if root == path:
__a : List[str] = f
__a : Dict = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 160
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 334
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = DebertaTokenizer
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = DebertaTokenizerFast
def _snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A: Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A: Union[str, Any] = {'''unk_token''': '''[UNK]'''}
A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = '''lower newer'''
A: str = '''lower newer'''
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
A: str = self.get_tokenizer()
A: Any = '''lower newer'''
A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokens + [tokenizer.unk_token]
A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
A: str = self.get_tokenizer()
A: List[str] = tokenizer('''Hello''' , '''World''' )
A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ )
@slow
def _snake_case ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: Dict = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
A: Dict = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']]
# fmt: off
A: Any = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
A: Optional[int] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ )
for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 334
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : str=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : List[str]=3_7 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=2 , ) -> str:
a_ : Union[str, Any] = parent
a_ : str = batch_size
a_ : List[Any] = image_size
a_ : int = patch_size
a_ : Optional[int] = num_channels
a_ : int = is_training
a_ : List[Any] = use_labels
a_ : Any = hidden_size
a_ : Optional[Any] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Dict = hidden_act
a_ : Tuple = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : Union[str, Any] = type_sequence_label_size
a_ : List[Any] = initializer_range
a_ : Union[str, Any] = scope
a_ : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a_ : int = (image_size // patch_size) ** 2
a_ : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Tuple = None
if self.use_labels:
a_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
a_ : Dict = DeiTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
a_ : str = DeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Any = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a_ : List[str] = 1
a_ : List[Any] = DeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
a_ : Tuple = self.type_sequence_label_size
a_ : Dict = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : int = 1
a_ : Optional[int] = DeiTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : str = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) ,
) : Optional[int] = config_and_inputs
a_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ : Dict = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : List[str] = DeiTModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Any = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[Any] = [*signature.parameters.keys()]
a_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Union[str, Any]:
a_ : List[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
if not self.model_tester.is_training:
return
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[int] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
a_ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a_ : Optional[Any] = False
a_ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
a_ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
a_ : Dict = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE__ ),
*get_values(SCREAMING_SNAKE_CASE__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
a_ : Optional[int] = problem_type['title']
a_ : Union[str, Any] = problem_type['num_labels']
a_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
a_ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if problem_type["num_labels"] > 1:
a_ : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
a_ : Tuple = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE__ ) as warning_list:
a_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Dict = DeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
"""simple docstring"""
a_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : Union[str, Any] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.default_image_processor
a_ : List[Any] = prepare_img()
a_ : Any = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : Any = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
a_ : Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : str = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
a_ : str = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
a_ : List[Any] = self.default_image_processor
a_ : int = prepare_img()
a_ : Dict = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
a_ : List[str] = inputs.pixel_values.to(SCREAMING_SNAKE_CASE__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a_ : Tuple = model(SCREAMING_SNAKE_CASE__ )
| 32
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : str = size if size is not None else {'shortest_edge': 2_5_6}
a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
a_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = do_resize
a_ : Dict = size
a_ : Optional[Any] = resample
a_ : Optional[int] = do_center_crop
a_ : Dict = crop_size
a_ : int = do_rescale
a_ : int = rescale_factor
a_ : Tuple = do_normalize
a_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray:
a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a_ : Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray:
a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]:
a_ : List[str] = do_resize if do_resize is not None else self.do_resize
a_ : Dict = size if size is not None else self.size
a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = resample if resample is not None else self.resample
a_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : int = crop_size if crop_size is not None else self.crop_size
a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ )
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Any = do_normalize if do_normalize is not None else self.do_normalize
a_ : str = image_mean if image_mean is not None else self.image_mean
a_ : Dict = image_std if image_std is not None else self.image_std
a_ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
a_ : str = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
a_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
a_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
a_ : Tuple = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 32
| 1
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A : Any = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A : int = spec.loader.load_module()
A : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def a__ ( ):
SCREAMING_SNAKE_CASE_ = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ = inspect.getsource(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ = True
break
SCREAMING_SNAKE_CASE_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ = "\n".join(sorted(__UpperCAmelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 356
|
import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305
| 0
|
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Any = credit_card_number
snake_case_ : int = 0
snake_case_ : Optional[Any] = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
snake_case_ : Dict = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case_ : Tuple = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : int = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_UpperCamelCase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 279
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
UpperCAmelCase__ = random.Random()
def _a ( a :str , a :int=1.0 , a :Any=None , a :Union[str, Any]=None ) -> Union[str, Any]:
if rng is None:
a = global_rng
a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : Optional[int]=400 , __UpperCAmelCase : Dict=2_000 , __UpperCAmelCase : Dict=1 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=16_000 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[str]=80 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Tuple=64 , __UpperCAmelCase : List[Any]="hann_window" , __UpperCAmelCase : int=80 , __UpperCAmelCase : int=7_600 , __UpperCAmelCase : Optional[Any]=1e-1_0 , __UpperCAmelCase : int=True , ) ->str:
"""simple docstring"""
a = parent
a = batch_size
a = min_seq_length
a = max_seq_length
a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a = feature_size
a = padding_value
a = sampling_rate
a = do_normalize
a = num_mel_bins
a = hop_length
a = win_length
a = win_function
a = fmin
a = fmax
a = mel_floor
a = return_attention_mask
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Dict=False ) ->Union[str, Any]:
"""simple docstring"""
def _flatten(__UpperCAmelCase : Dict ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def __lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[Any]=False ) ->Optional[int]:
"""simple docstring"""
if equal_length:
a = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase_ ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = SpeechTaFeatureExtractor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a = SpeechTaFeatureExtractionTester(self )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
a = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
a = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
a = feat_extract(__UpperCAmelCase , return_tensors='''np''' ).input_values
a = feat_extract(__UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a = ['''longest''', '''max_length''', '''do_not_pad''']
a = [None, 1_600, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
a = feat_extract(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors='''np''' )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = range(800 , 1_400 , 200 )
a = [floats_list((1, x) )[0] for x in lengths]
a = ['''longest''', '''max_length''', '''do_not_pad''']
a = [None, 1_600, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
a = feat_extract(__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_000 , padding='''max_length''' , return_tensors='''np''' )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_000 , padding='''longest''' , return_tensors='''np''' )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=2_000 , padding='''longest''' , return_tensors='''np''' )
a = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a = np.random.rand(100 ).astype(np.floataa )
a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
a = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
a = feature_extractor(audio_target=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
a = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_values
a = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a = np.asarray(__UpperCAmelCase )
a = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_values
a = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = self.feat_extract_tester.prepare_inputs_for_target()
a = self.feature_extraction_class(**self.feat_extract_dict )
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
a = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
a = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
a = self.feature_extraction_class(**self.feat_extract_dict )
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
a = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
a = self.feature_extraction_class(**self.feat_extract_dict )
a = self.feat_extract_tester.prepare_inputs_for_target()
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
a = feat_extract.num_mel_bins # hack!
a = feat_extract.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
a = feat_extract.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
a = self.feat_extract_dict
a = True
a = self.feature_extraction_class(**__UpperCAmelCase )
a = self.feat_extract_tester.prepare_inputs_for_target()
a = [len(__UpperCAmelCase ) for x in speech_inputs]
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
a = feat_extract.num_mel_bins # hack!
a = feat_extract.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
a = self.feat_extract_dict
a = True
a = self.feature_extraction_class(**__UpperCAmelCase )
a = self.feat_extract_tester.prepare_inputs_for_target()
a = [len(__UpperCAmelCase ) for x in speech_inputs]
a = feat_extract.model_input_names[0]
a = BatchFeature({input_name: speech_inputs} )
a = min(__UpperCAmelCase )
a = feat_extract.num_mel_bins # hack!
a = feat_extract.pad(
__UpperCAmelCase , padding='''max_length''' , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
from datasets import load_dataset
a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a = ds.sort('''id''' ).select(range(__UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
a = self._load_datasamples(1 )
a = SpeechTaFeatureExtractor()
a = feature_extractor(__UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __UpperCAmelCase , atol=1e-6 ) )
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
a = self._load_datasamples(1 )
a = SpeechTaFeatureExtractor()
a = feature_extractor(audio_target=__UpperCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __UpperCAmelCase , atol=1e-4 ) )
| 352
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26
| 0
|
"""simple docstring"""
def lowerCamelCase ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
UpperCAmelCase : Union[str, Any] = generate_large_matrix()
UpperCAmelCase : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase ( _UpperCamelCase : Dict ) -> None:
'''simple docstring'''
assert all(row == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for row in grid )
assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[str] = (left + right) // 2
__UpperCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : int = mid + 1
else:
__UpperCAmelCase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Optional[int] = len(grid[0] )
for i in range(len(_UpperCamelCase ) ):
__UpperCAmelCase : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCamelCase ) * len(grid[0] )) - total
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase ( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = 0
for row in grid:
for i, number in enumerate(_UpperCamelCase ):
if number < 0:
total += len(_UpperCamelCase ) - i
break
return total
def lowerCamelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Running benchmarks""" )
__UpperCAmelCase : Any = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : str = timeit(f'''{func}(grid=grid)''' , setup=_UpperCamelCase , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 115
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333
| 0
|
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __A ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase__ : torch.FloatTensor
UpperCamelCase__ : Optional[torch.FloatTensor] =None
def A ( a_ ,a_=0.999 ,a_="cosine" ,) -> List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCamelCase : Any =[]
for i in range(_SCREAMING_SNAKE_CASE ):
__UpperCamelCase : Optional[Any] =i / num_diffusion_timesteps
__UpperCamelCase : List[Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE ,dtype=torch.floataa )
class __A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase__ : List[Any] =1
@register_to_config
def __init__( self , lowerCamelCase__ = 1000 , lowerCamelCase__ = 0.0_001 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = 0 , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = 1.0 , **lowerCamelCase__ , ):
"""simple docstring"""
if kwargs.get('set_alpha_to_one' , _snake_case ) is not None:
__UpperCamelCase : str =(
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate('set_alpha_to_one' , '1.0.0' , _snake_case , standard_warn=_snake_case )
__UpperCamelCase : List[str] =kwargs["set_alpha_to_one"]
if trained_betas is not None:
__UpperCamelCase : Union[str, Any] =torch.tensor(_snake_case , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase : Union[str, Any] =torch.linspace(_snake_case , _snake_case , _snake_case , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase : List[str] =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , _snake_case , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase : int =betas_for_alpha_bar(_snake_case )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__UpperCamelCase : Optional[int] =1.0 - self.betas
__UpperCamelCase : List[str] =torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__UpperCamelCase : Optional[Any] =torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__UpperCamelCase : Tuple =1.0
# setable values
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : Union[str, Any] =torch.from_numpy(np.arange(0 , _snake_case ).copy().astype(np.intaa ) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
f' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
f' maximal {self.config.num_train_timesteps} timesteps.' )
__UpperCamelCase : List[Any] =num_inference_steps
__UpperCamelCase : List[Any] =self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : str =(np.arange(0 , _snake_case ) * step_ratio).round().copy().astype(np.intaa )
__UpperCamelCase : str =torch.from_numpy(_snake_case ).to(_snake_case )
self.timesteps += self.config.steps_offset
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = True , ):
"""simple docstring"""
__UpperCamelCase : Tuple =timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__UpperCamelCase : List[Any] =self.alphas_cumprod[timestep]
__UpperCamelCase : List[str] =(
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__UpperCamelCase : Tuple =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase : str =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__UpperCamelCase : Optional[Any] =model_output
elif self.config.prediction_type == "sample":
__UpperCamelCase : Union[str, Any] =model_output
__UpperCamelCase : Union[str, Any] =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Union[str, Any] =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__UpperCamelCase : List[str] =(alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase : List[str] =pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCamelCase : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCamelCase : str =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 245
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
snake_case_ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowercase__ ( lowercase__ ):
def __init__( self : Optional[Any] ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**_A )
requires_backends(self ,'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] ,lowerCamelCase__ : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
return super().__call__(_A ,**_A )
def UpperCamelCase_ ( self : Any ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : str = {}
if "candidate_labels" in kwargs:
_UpperCamelCase : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCamelCase : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : int="This is a photo of {}." ):
'''simple docstring'''
_UpperCamelCase : Dict = load_image(_A )
_UpperCamelCase : List[str] = self.image_processor(images=[image] ,return_tensors=self.framework )
_UpperCamelCase : Optional[Any] = candidate_labels
_UpperCamelCase : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
_UpperCamelCase : str = self.tokenizer(_A ,return_tensors=self.framework ,padding=_A )
_UpperCamelCase : Optional[Any] = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : str = model_inputs.pop('candidate_labels' )
_UpperCamelCase : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] ,_A ):
_UpperCamelCase : Dict = text_inputs[0]
else:
# Batching case.
_UpperCamelCase : Optional[Any] = text_inputs[0][0]
_UpperCamelCase : List[Any] = self.model(**_A ,**_A )
_UpperCamelCase : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = model_outputs.pop('candidate_labels' )
_UpperCamelCase : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
_UpperCamelCase : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
_UpperCamelCase : Tuple = probs.tolist()
if not isinstance(_A ,_A ):
_UpperCamelCase : Any = [scores]
elif self.framework == "tf":
_UpperCamelCase : Any = stable_softmax(_A ,axis=-1 )
_UpperCamelCase : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_UpperCamelCase : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A ,_A ) ,key=lambda lowerCamelCase__ : -x[0] )
]
return result
| 83
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , )-> int:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = patch_size
UpperCamelCase = max_length
UpperCamelCase = num_mel_bins
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = frequency_stride
UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase = frequency_out_dimension * time_out_dimension
UpperCamelCase = num_patches + 2
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, input_values, labels
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = ASTModel(config=A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ASTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ASTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_( ):
UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset')
UpperCamelCase , UpperCamelCase = torchaudio.load(A)
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.default_feature_extractor
UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(A_ )
UpperCamelCase = self.default_feature_extractor
UpperCamelCase , UpperCamelCase = prepare_audio()
UpperCamelCase = audio.squeeze().numpy()
UpperCamelCase = feature_extractor(A_ , sampling_rate=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**A_ )
# verify the logits
UpperCamelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 251
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( snake_case__ ):
_lowercase : int = ['image_processor', 'tokenizer']
_lowercase : Union[str, Any] = 'LayoutLMv3ImageProcessor'
_lowercase : List[str] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Any , UpperCAmelCase : Dict=None , UpperCAmelCase : Tuple=None , **UpperCAmelCase : Optional[Any] ) -> str:
__lowerCAmelCase: str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase , )
__lowerCAmelCase: List[Any] = kwargs.pop('feature_extractor' )
__lowerCAmelCase: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
__lowerCAmelCase: str = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCAmelCase: List[str] = features['words']
__lowerCAmelCase: List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
__lowerCAmelCase: Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__lowerCAmelCase: int = self.get_overflowing_images(UpperCAmelCase , encoded_inputs['overflow_to_sample_mapping'] )
__lowerCAmelCase: str = images
return encoded_inputs
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowerCAmelCase: str = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F''' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}''' )
return images_with_overflow
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase , )
return self.image_processor
| 322
|
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: Optional[int] = len(SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _a ( SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE ) // 2
__lowerCAmelCase: str = arr[0:mid]
__lowerCAmelCase: int = arr[mid:]
__lowerCAmelCase , __lowerCAmelCase: List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: int = _count_cross_inversions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: List[str] = 0
while i < len(SCREAMING_SNAKE_CASE ) and j < len(SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: List[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: str = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCAmelCase: Tuple = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__lowerCAmelCase: int = []
__lowerCAmelCase: Any = count_inversions_bf(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase: Dict = count_inversions_recursive(SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 322
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case__ : Dict = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :str = '''albert'''
def __init__( self , snake_case_=3_0_0_0_0 , snake_case_=1_2_8 , snake_case_=4_0_9_6 , snake_case_=1_2 , snake_case_=1 , snake_case_=6_4 , snake_case_=1_6_3_8_4 , snake_case_=1 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=0 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.1 , snake_case_="absolute" , snake_case_=0 , snake_case_=2 , snake_case_=3 , **snake_case_ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Any = inner_group_num
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 274
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = '''autoformer'''
lowerCamelCase_ :Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = [1, 2, 3, 4, 5, 6, 7] , snake_case_ = True , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = "gelu" , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_ = True , snake_case_=True , snake_case_ = 1_0 , snake_case_ = 2_5 , snake_case_ = 3 , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = prediction_length
UpperCAmelCase_ : List[str] = context_length if context_length is not None else prediction_length
UpperCAmelCase_ : Optional[int] = distribution_output
UpperCAmelCase_ : Optional[int] = loss
UpperCAmelCase_ : Union[str, Any] = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : List[str] = lags_sequence
UpperCAmelCase_ : Any = scaling
UpperCAmelCase_ : Any = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Optional[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[str] = embedding_dimension
else:
UpperCAmelCase_ : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : List[str] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : str = encoder_attention_heads
UpperCAmelCase_ : str = decoder_attention_heads
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : str = decoder_ffn_dim
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : str = decoder_layers
UpperCAmelCase_ : str = dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : Any = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase_ : Any = label_length
UpperCAmelCase_ : Union[str, Any] = moving_average
UpperCAmelCase_ : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 274
| 1
|
"""simple docstring"""
import operator as op
UpperCAmelCase = """scaler.pt"""
UpperCAmelCase = """pytorch_model"""
UpperCAmelCase = """random_states"""
UpperCAmelCase = """optimizer"""
UpperCAmelCase = """scheduler"""
UpperCAmelCase = """pytorch_model.bin"""
UpperCAmelCase = """pytorch_model.bin.index.json"""
UpperCAmelCase = """model.safetensors"""
UpperCAmelCase = """model.safetensors.index.json"""
UpperCAmelCase = """1.10.2"""
UpperCAmelCase = """py38"""
UpperCAmelCase = """4.17.0"""
UpperCAmelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
UpperCAmelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
UpperCAmelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
UpperCAmelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
UpperCAmelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
UpperCAmelCase = """2.0.1"""
UpperCAmelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
UpperCAmelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
UpperCAmelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
UpperCAmelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
UpperCAmelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 256
|
"""simple docstring"""
import os
import sys
import unittest
UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Tuple ) -> str:
_UpperCamelCase = find_backend(''' if not is_torch_available():''' )
self.assertEqual(__UpperCamelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_UpperCamelCase = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__UpperCamelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_UpperCamelCase = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__UpperCamelCase , '''torch_and_transformers_and_onnx''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __UpperCamelCase )
self.assertIn('''torch_and_transformers''' , __UpperCamelCase )
self.assertIn('''flax_and_transformers''' , __UpperCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , __UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def _UpperCamelCase ( self : Tuple ) -> Optional[int]:
_UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__UpperCamelCase , '''\nCONSTANT = None\n''' )
_UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__UpperCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_UpperCamelCase = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Any:
_UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __UpperCamelCase )
| 256
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A__ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
A__ = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
A__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 4
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
A__ = replicate(lowercase_ )
A__ = jax.random.split(lowercase_ , lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5E-1
A__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
A__ = replicate(lowercase_ )
A__ = jax.random.split(lowercase_ , lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5E-1
def UpperCAmelCase_ ( self :Optional[int] )-> int:
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
A__ = replicate(lowercase_ )
A__ = jax.random.split(lowercase_ , lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
A__ = replicate(lowercase_ )
A__ = jax.random.split(lowercase_ , lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def UpperCAmelCase_ ( self :Optional[Any] )-> Dict:
A__ = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
A__ = scheduler.create_state()
A__ = scheduler_state
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
A__ = replicate(lowercase_ )
A__ = jax.random.split(lowercase_ , lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5E-1
def UpperCAmelCase_ ( self :Any )-> Any:
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
A__ = replicate(lowercase_ )
A__ = pipeline.prepare_inputs(lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
A__ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
A__, A__ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
A__ = replicate(lowercase_ )
A__ = pipeline.prepare_inputs(lowercase_ )
A__ = shard(lowercase_ )
A__ = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
A__ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 123
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ):
A__ = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ""
else:
A__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( _lowerCamelCase : Any ):
A__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = dct.pop(_lowerCamelCase )
A__ = val
def UpperCamelCase ( ):
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int=False ):
A__ = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , )
A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 )
A__ = False
# load original model from timm
A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = "huggingface/label-files"
A__ = "imagenet-1k-id2label.json"
A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTHybridModel(_lowerCamelCase ).eval()
else:
A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# create image processor
A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) )
A__ = transform.transforms
A__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A__ = ViTHybridImageProcessor(
do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A__ = prepare_img()
A__ = transform(_lowerCamelCase ).unsqueeze(0 )
A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
# verify logits
with torch.no_grad():
A__ = model(_lowerCamelCase )
A__ = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
A__ = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 )
else:
A__ = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(F"ybelkada/{vit_name}" )
processor.push_to_hub(F"ybelkada/{vit_name}" )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
__lowerCAmelCase : Optional[Any] =parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 123
| 1
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = tokenizer(example["""content"""] , truncation=lowerCAmelCase )["""input_ids"""]
_lowerCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
A__ : int =HfArgumentParser(PretokenizationArguments)
A__ : Dict =parser.parse_args()
if args.num_workers is None:
A__ : int =multiprocessing.cpu_count()
A__ : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
A__ : Tuple =time.time()
A__ : Optional[int] =load_dataset(args.dataset_name, split='''train''')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
A__ : Union[str, Any] =time.time()
A__ : Optional[int] =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
A__ : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 70
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ,__UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_tool('''text-to-speech''' )
self.tool.setup()
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = self.tool('''hey''' )
__UpperCAmelCase : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = self.tool('''hey''' )
__UpperCAmelCase : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 226
| 0
|
"""simple docstring"""
UpperCamelCase = 8.314_462 # Unit - J mol-1 K-1
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value.")
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value.")
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 370
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 334
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a : str = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
a : Optional[Any] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__ , os.listdir(lowerCAmelCase__ )[0] , "snapshots" ) )]
a : Optional[int] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Any:
a : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCAmelCase__ )
a : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : Optional[Any] = jax.random.PRNGKey(0 )
a : Optional[Any] = 4
a : Dict = jax.device_count()
a : Tuple = num_samples * [prompt]
a : List[Any] = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Any = replicate(lowerCAmelCase__ )
a : Any = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = shard(lowerCAmelCase__ )
a : Tuple = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
a : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase__ ) == num_samples
def __a ( self ) -> Optional[Any]:
a : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCAmelCase__ )
a : Optional[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : Dict = jax.random.PRNGKey(0 )
a : List[Any] = 50
a : Optional[Any] = jax.device_count()
a : str = num_samples * [prompt]
a : str = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Optional[Any] = replicate(lowerCAmelCase__ )
a : str = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[Any] = shard(lowerCAmelCase__ )
a : Dict = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def __a ( self ) -> Optional[Any]:
a : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ )
a : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : str = jax.random.PRNGKey(0 )
a : Dict = 50
a : Optional[Any] = jax.device_count()
a : int = num_samples * [prompt]
a : str = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Optional[int] = replicate(lowerCAmelCase__ )
a : str = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = shard(lowerCAmelCase__ )
a : Union[str, Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __a ( self ) -> List[str]:
a : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
a : Dict = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : int = jax.random.PRNGKey(0 )
a : str = 50
a : Any = jax.device_count()
a : str = num_samples * [prompt]
a : Any = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Optional[Any] = replicate(lowerCAmelCase__ )
a : Any = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = shard(lowerCAmelCase__ )
a : int = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def __a ( self ) -> List[str]:
a : Dict = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , )
a : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
a : Optional[int] = scheduler.create_state()
a : List[str] = scheduler_state
a : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : Tuple = jax.random.PRNGKey(0 )
a : Union[str, Any] = 50
a : List[Any] = jax.device_count()
a : Optional[Any] = num_samples * [prompt]
a : Dict = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
a : Optional[Any] = replicate(lowerCAmelCase__ )
a : Tuple = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = shard(lowerCAmelCase__ )
a : List[Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def __a ( self ) -> Any:
a : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
a : int = jax.device_count()
a : int = num_samples * [prompt]
a : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase__ )
a : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , )
a : Any = replicate(lowerCAmelCase__ )
a : str = pipeline.prepare_inputs(lowerCAmelCase__ )
a : Optional[Any] = shard(lowerCAmelCase__ )
a : Optional[int] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a : Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , use_memory_efficient_attention=lowerCAmelCase__ , )
a : Optional[Any] = replicate(lowerCAmelCase__ )
a : Tuple = pipeline.prepare_inputs(lowerCAmelCase__ )
a : Union[str, Any] = shard(lowerCAmelCase__ )
a : str = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a : Optional[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 371
|
"""simple docstring"""
from itertools import product
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->list[int]:
'''simple docstring'''
a : Dict = sides_number
a : List[str] = max_face_number * dice_number
a : Optional[int] = [0] * (max_total + 1)
a : Dict = 1
a : Optional[Any] = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
a : Union[str, Any] = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def _SCREAMING_SNAKE_CASE ( ) ->float:
'''simple docstring'''
a : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a : List[Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a : Optional[Any] = 0
a : Tuple = 9
a : Union[str, Any] = 4 * 9
a : Any = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a : List[str] = (4**9) * (6**6)
a : List[Any] = peter_wins_count / total_games_number
a : Any = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_SCREAMING_SNAKE_CASE : Tuple = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
|
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = 0
while index < len(snake_case ) - 1:
snake_case_ = SYMBOLS[numerals[index]]
snake_case_ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = ""
snake_case_ = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
snake_case_ = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
snake_case_ = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ):
'''simple docstring'''
snake_case_ = 0
with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea:
snake_case_ = filea.readlines()
for line in lines:
snake_case_ = line.strip()
snake_case_ = parse_roman_numerals(snake_case )
snake_case_ = generate_roman_numerals(snake_case )
savings += len(snake_case ) - len(snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 85
| 1
|
'''simple docstring'''
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = name
lowercase_ : Any = value
lowercase_ : Dict = weight
def __repr__( self ):
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _snake_case ( self ):
"""simple docstring"""
return self.value
def _snake_case ( self ):
"""simple docstring"""
return self.name
def _snake_case ( self ):
"""simple docstring"""
return self.weight
def _snake_case ( self ):
"""simple docstring"""
return self.value / self.weight
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
lowercase_ : Any = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : int = sorted(__SCREAMING_SNAKE_CASE , key=__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = []
lowercase_ , lowercase_ : Optional[int] = 0.0, 0.0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'glpn'
def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[32, 64, 160, 256] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0.1 , lowercase=1e-6 , lowercase=64 , lowercase=10 , lowercase=-1 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = max_depth
A__ = head_in_index
| 68
|
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26
| 0
|
lowerCamelCase : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ):
__lowercase : int = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : Optional[int] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Union[str, Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : int = topological_sort('''a''', [], [])
print(sort)
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase ( a__ , a__ , a__ ) -> float:
__a = x
__a = y
for step in range(a__ ): # noqa: B007
__a = a * a - b * b + x
__a = 2 * a * b + y
__a = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase ( a__ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __lowerCAmelCase ( a__ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) )
def __lowerCAmelCase ( a__ = 800 , a__ = 600 , a__ = -0.6 , a__ = 0 , a__ = 3.2 , a__ = 50 , a__ = True , ) -> Image.Image:
__a = Image.new('''RGB''' , (image_width, image_height) )
__a = img.load()
# loop through the image-coordinates
for image_x in range(a__ ):
for image_y in range(a__ ):
# determine the figure-coordinates based on the image-coordinates
__a = figure_width / image_width * image_height
__a = figure_center_x + (image_x / image_width - 0.5) * figure_width
__a = figure_center_y + (image_y / image_height - 0.5) * figure_height
__a = get_distance(a__ , a__ , a__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__a = get_color_coded_rgb(a__ )
else:
__a = get_black_and_white_rgb(a__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
A : Tuple = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 6
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 105
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 280
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "dandelin/vilt-b32-finetuned-vqa"
lowerCAmelCase__ : int = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
lowerCAmelCase__ : List[Any] = "image_qa"
lowerCAmelCase__ : Any = AutoProcessor
lowerCAmelCase__ : Tuple = AutoModelForVisualQuestionAnswering
lowerCAmelCase__ : int = ["image", "text"]
lowerCAmelCase__ : Any = ["text"]
def __init__( self : Optional[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
def a__ ( self : int , _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.model(**_UpperCAmelCase ).logits
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 325
|
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Any=3_2 * 4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_2 * 6 ,SCREAMING_SNAKE_CASE__ : int=4 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_2 ,):
__lowerCamelCase : str = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : Optional[int] = use_auxiliary_loss
__lowerCamelCase : int = num_queries
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : Optional[int] = min_size
__lowerCamelCase : Optional[Any] = max_size
__lowerCamelCase : Dict = num_labels
__lowerCamelCase : Union[str, Any] = mask_feature_size
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE__) > 0.5
).float()
__lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=SCREAMING_SNAKE_CASE__) > 0.5).long()
__lowerCamelCase : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase ( self : Any):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase : Dict = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : List[Any] = output.encoder_hidden_states
__lowerCamelCase : Any = output.pixel_decoder_hidden_states
__lowerCamelCase : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__) ,len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__) ,len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__) ,config.decoder_config.decoder_layers)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int]=False):
with torch.no_grad():
__lowerCamelCase : Tuple = MaskFormerModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE__ : str):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
__lowerCamelCase : int = model(pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE__)
comm_check_on_output(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = model(
pixel_values=SCREAMING_SNAKE_CASE__ ,pixel_mask=SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__)
comm_check_on_output(SCREAMING_SNAKE_CASE__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape ,torch.Size([1]))
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_UpperCAmelCase : Dict = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = False
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = MaskFormerModelTester(self)
__lowerCamelCase : Tuple = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def lowerCAmelCase ( self : int):
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def lowerCAmelCase ( self : Any):
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def lowerCAmelCase ( self : int):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase ( self : str):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self : int):
pass
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Any = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[Any]):
for model_name in ["facebook/maskformer-swin-small-coco"]:
__lowerCamelCase : str = MaskFormerModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = (self.model_tester.min_size,) * 2
__lowerCamelCase : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=SCREAMING_SNAKE_CASE__),
'mask_labels': torch.randn((2, 1_0, *size) ,device=SCREAMING_SNAKE_CASE__),
'class_labels': torch.zeros(2 ,1_0 ,device=SCREAMING_SNAKE_CASE__).long(),
}
__lowerCamelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE__ ,output_attentions=SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase ( self : Optional[Any]):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase : Optional[Any] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.train()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int]):
# only MaskFormerForInstanceSegmentation has the loss
__lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Any = True
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.train()
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__ ,mask_labels=SCREAMING_SNAKE_CASE__ ,class_labels=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__lowerCamelCase : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
a =1E-4
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Dict):
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : Optional[Any] = prepare_img()
__lowerCamelCase : int = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(SCREAMING_SNAKE_CASE__)
.eval()
)
__lowerCamelCase : Optional[int] = self.default_image_processor
__lowerCamelCase : Optional[Any] = prepare_img()
__lowerCamelCase : int = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__)
# masks_queries_logits
__lowerCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
__lowerCamelCase : Union[str, Any] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
__lowerCamelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
# class_queries_logits
__lowerCamelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
__lowerCamelCase : str = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(SCREAMING_SNAKE_CASE__)
.eval()
)
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : Any = prepare_img()
__lowerCamelCase : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE__ ,(1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__)
# masks_queries_logits
__lowerCamelCase : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
__lowerCamelCase : Any = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
__lowerCamelCase : int = torch.tensor(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
# class_queries_logits
__lowerCamelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
__lowerCamelCase : Any = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : int = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(SCREAMING_SNAKE_CASE__)
.eval()
)
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] ,segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] ,return_tensors='pt' ,)
__lowerCamelCase : Optional[int] = inputs['pixel_values'].to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE__) for el in inputs['mask_labels']]
__lowerCamelCase : str = [el.to(SCREAMING_SNAKE_CASE__) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE__)
self.assertTrue(outputs.loss is not None)
| 113
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a =[
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
__lowerCamelCase : int = True
while ask_again:
__lowerCamelCase : Dict = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=[] , lowerCamelCase__=None , lowerCamelCase__=0 ) -> str:
__lowerCamelCase : Union[str, Any] = BulletMenu(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Tuple = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
__lowerCamelCase : List[str] = int(lowerCamelCase__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
__lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Union[str, Any] = int(lowerCamelCase__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class A_ ( argparse.RawDescriptionHelpFormatter ):
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : int = super()._format_usage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = usage.replace('<command> [<args>] ' ,'')
return usage
| 113
| 1
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(A__ , A__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
UpperCamelCase = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''BeitFeatureExtractor''']
__lowerCAmelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase_ = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
lowercase_ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['input_ids', 'attention_mask']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__(self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_a = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , )
_a = vocab_file
_a = load_json(A )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(A , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else '''en'''
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def a__ (self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__ (self , A ) -> None:
"""simple docstring"""
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A , self.encoder[self.unk_token] )
def a__ (self , A ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A , self.unk_token )
def a__ (self , A ) -> Dict:
"""simple docstring"""
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
_a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ (self , A , A = None , A = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self ) -> Dict:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> None:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
_a = Path(A )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A )
elif not os.path.isfile(self.spm_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def a__ (self , A , A = "en" , A = None , A = "ro" , **A , ) -> BatchEncoding:
"""simple docstring"""
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A , A , **A )
def a__ (self , A , A , A , **A ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(A , add_special_tokens=A , **A )
_a = self.get_lang_id(A )
_a = tgt_lang_id
return inputs
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ) -> Tuple:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a__ (self , A ) -> int:
"""simple docstring"""
_a = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = sentencepiece.SentencePieceProcessor(**__A)
spm.Load(str(__A))
return spm
def lowerCAmelCase (__A):
"""simple docstring"""
with open(__A , '''r''') as f:
return json.load(__A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
with open(__A , '''w''') as f:
json.dump(__A , __A , indent=2)
| 211
| 0
|
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : Any = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __magic_name__ ( ) -> Any:
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
__lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 361
|
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE__ : Any = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def __magic_name__ ( ) -> Any:
__lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowerCamelCase = g.get_repo('''huggingface/transformers''' )
__lowerCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase )
__lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 339
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase__ : Dict = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase__ : str = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[str]=1_00 , _lowerCamelCase: Any=" " ):
__SCREAMING_SNAKE_CASE : Optional[int] = text.split(_lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )]
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_lowerCamelCase ):
titles.append(title if title is not None else """""" )
texts.append(_lowerCamelCase )
return {"title": titles, "text": texts}
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[str] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__SCREAMING_SNAKE_CASE : Optional[Any] = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Any , _lowerCamelCase: int , ):
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__SCREAMING_SNAKE_CASE : Dict = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__SCREAMING_SNAKE_CASE : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__SCREAMING_SNAKE_CASE : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__SCREAMING_SNAKE_CASE : Union[str, Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
__SCREAMING_SNAKE_CASE : int = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__SCREAMING_SNAKE_CASE : List[str] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_lowerCamelCase )
# And save the index
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Any = field(
default=str(Path(_a ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
_A : List[Any] = field(
default=_a , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
_A : Any = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
_A : Optional[Any] = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
_A : List[str] = field(
default=str(Path(_a ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Any = field(
default=_a , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
_A : Tuple = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : Dict = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
_A : List[Any] = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase__ : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ : Optional[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 112
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333
| 0
|
"""simple docstring"""
import requests
_a = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''], 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 100
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = analyze_text(__snake_case )
_UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCamelCase = sum(single_char_strings.values() )
# one length string
_UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCamelCase = single_char_strings[ch]
_UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_UpperCamelCase = sum(two_char_strings.values() )
_UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCamelCase = cha + cha
if sequence in two_char_strings:
_UpperCamelCase = two_char_strings[sequence]
_UpperCamelCase = int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase__ ( __snake_case ) -> tuple[dict, dict]:
"""simple docstring"""
_UpperCamelCase = Counter() # type: ignore
_UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 1
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 357
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : List[str]=30 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , ):
lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : int = image_size
lowerCAmelCase : Tuple = min_resolution
lowerCAmelCase : Any = max_resolution
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : int = do_center_crop
lowerCAmelCase : str = crop_size
def lowercase__ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'crop_size' ) )
def lowercase__ ( self : int ):
lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
# Initialize image_processing
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Dict = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase : List[str] = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : int = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 137
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a_ : List[Any] = logging.getLogger(__name__)
a_ : Dict = 'pytorch_model.bin'
@dataclasses.dataclass
class _snake_case :
_lowercase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_lowercase : Optional[str] = dataclasses.field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class _snake_case :
_lowercase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_lowercase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_lowercase : Optional[str] = dataclasses.field(
default=A__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_lowercase : Optional[str] = dataclasses.field(
default=A__ , metadata={'''help''': '''The name of the task to train on.'''} , )
_lowercase : Optional[List[str]] = dataclasses.field(
default=A__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _snake_case :
_lowercase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_lowercase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
_lowercase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
_lowercase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
_lowercase : Optional[bool] = dataclasses.field(
default=A__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
_lowercase : Optional[bool] = dataclasses.field(
default=A__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
_lowercase : Optional[bool] = dataclasses.field(
default=A__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
_lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
_lowercase : Optional[int] = dataclasses.field(
default=1_00 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_lowercase : Optional[int] = dataclasses.field(
default=A__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE = dataset.filter(lambda _UpperCAmelCase: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE = int(eval_result * len(_UpperCAmelCase))
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset.sort('probability' , reverse=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset.select(range(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = dataset.remove_columns(['label', 'probability'])
SCREAMING_SNAKE_CASE = dataset.rename_column('prediction' , 'label')
SCREAMING_SNAKE_CASE = dataset.map(lambda _UpperCAmelCase: {"label": idalabel[example["label"]]})
SCREAMING_SNAKE_CASE = dataset.shuffle(seed=args.seed)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , F'''train_pseudo.{args.data_file_extension}''')
if args.data_file_extension == "csv":
dataset.to_csv(_UpperCAmelCase , index=_UpperCAmelCase)
else:
dataset.to_json(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE = STModelArguments(model_name_or_path=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = STDataArguments(train_file=_UpperCAmelCase , infer_file=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = STTrainingArguments(output_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_UpperCAmelCase).items():
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for key, value in kwargs.items():
if hasattr(_UpperCAmelCase , _UpperCAmelCase):
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Sanity checks
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE = args.train_file
SCREAMING_SNAKE_CASE = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE = data_files[key].split('.')[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info('Creating the initial data directory for self-training...')
SCREAMING_SNAKE_CASE = F'''{args.output_dir}/self-train_iter-{{}}'''.format
SCREAMING_SNAKE_CASE = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_UpperCAmelCase)
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
# Show the progress bar
SCREAMING_SNAKE_CASE = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
SCREAMING_SNAKE_CASE = data_dir_format(_UpperCAmelCase)
assert os.path.exists(_UpperCAmelCase)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'stage-1')
SCREAMING_SNAKE_CASE = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_UpperCAmelCase , _UpperCAmelCase):
arguments_dict.update({key: value})
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'best-checkpoint' , _UpperCAmelCase)
if os.path.exists(_UpperCAmelCase):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _UpperCAmelCase , _UpperCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _UpperCAmelCase)
finetune(**_UpperCAmelCase)
accelerator.wait_for_everyone()
assert os.path.exists(_UpperCAmelCase)
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _UpperCAmelCase)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'best-checkpoint')
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'stage-2')
# Update arguments_dict
SCREAMING_SNAKE_CASE = model_path
SCREAMING_SNAKE_CASE = data_files['train']
SCREAMING_SNAKE_CASE = current_output_dir
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'best-checkpoint' , _UpperCAmelCase)
if os.path.exists(_UpperCAmelCase):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _UpperCAmelCase , _UpperCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _UpperCAmelCase)
finetune(**_UpperCAmelCase)
accelerator.wait_for_everyone()
assert os.path.exists(_UpperCAmelCase)
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = iteration
SCREAMING_SNAKE_CASE = data_dir_format(iteration + 1)
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(os.path.join(_UpperCAmelCase , 'best-checkpoint'))
SCREAMING_SNAKE_CASE = config.idalabel
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'eval_results_best-checkpoint.json')
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'test_results_best-checkpoint.json')
assert os.path.exists(_UpperCAmelCase)
with open(_UpperCAmelCase , 'r') as f:
SCREAMING_SNAKE_CASE = float(json.load(_UpperCAmelCase)[args.eval_metric])
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'infer_output_best-checkpoint.csv')
assert os.path.exists(_UpperCAmelCase)
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']})['data']
SCREAMING_SNAKE_CASE = load_dataset('csv' , data_files={'data': infer_output_file})['data']
if accelerator.is_main_process:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
shutil.copy(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F'''eval_results_iter-{iteration}.json'''))
if os.path.exists(_UpperCAmelCase):
shutil.copy(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F'''test_results_iter-{iteration}.json'''))
create_pseudo_labeled_data(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , F'''train_pseudo.{args.data_file_extension}''')
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE = new_iteration
SCREAMING_SNAKE_CASE = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE = new_iteration
SCREAMING_SNAKE_CASE = new_eval_result
SCREAMING_SNAKE_CASE = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE = new_iteration
SCREAMING_SNAKE_CASE = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _UpperCAmelCase)
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _UpperCAmelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_UpperCAmelCase , F'''eval_results_iter-{iteration}.json''') , os.path.join(_UpperCAmelCase , 'eval_results_best-iteration.json') , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1)
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _UpperCAmelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_UpperCAmelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(_UpperCAmelCase , 'eval_results_best-iteration.json') , )
| 137
| 1
|
"""simple docstring"""
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=20_48) ->List[Any]:
a_ = config.__dict__
a_ = modal_hidden_size
if num_labels:
a_ = num_labels
| 303
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase) ->None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 303
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Union[str, Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
lowerCamelCase : Tuple = {'mobilebert-uncased': 512}
lowerCamelCase : int = {}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = MobileBertTokenizer
def __init__(self : Optional[int] , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : str="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : str="[MASK]" , UpperCamelCase : Tuple=True , UpperCamelCase : List[str]=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(UpperCamelCase , normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**UpperCamelCase )
lowercase__ = do_lower_case
def UpperCamelCase__ (self : str , UpperCamelCase : Tuple , UpperCamelCase : Any=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ (self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 2
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''')
else:
SCREAMING_SNAKE_CASE = key.replace('moe_layer.experts.' , 'ffn.experts.expert_')
if "gate" in key:
SCREAMING_SNAKE_CASE = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier')
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace('.fc2.' , '.ffn.fc2.')
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace('.fc1.' , '.ffn.fc1.')
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE = key.replace('.encoder_attn.' , '.cross_attention.')
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm')
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace('final_layer_norm' , 'ff_layer_norm')
SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
for expert in range(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase)['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = rename_fairseq_keys(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
torch.save(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_UpperCAmelCase)[0]].dtype)
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + '-shared.pt')['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = rename_fairseq_keys(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_UpperCAmelCase) == 1:
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
torch.save(_UpperCAmelCase , _UpperCAmelCase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_UpperCAmelCase , _UpperCAmelCase)
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(_UpperCAmelCase):05d}.bin''')
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {'total_size': total_size}
SCREAMING_SNAKE_CASE = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
return metadata, index
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a_ : Optional[Any] = parser.parse_args()
a_ , a_ : Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : List[Any] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 137
| 0
|
'''simple docstring'''
lowerCAmelCase :Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowerCAmelCase :Tuple = {value: key for key, value in encode_dict.items()}
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if set(lowerCAmelCase ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
__magic_name__ : str = ''
for word in coded.split():
while len(lowerCAmelCase ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ : Tuple = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 360
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Dict ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Union[str, Any] = self.dummy_uncond_unet
__magic_name__ : str = KarrasVeScheduler()
__magic_name__ : List[Any] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Dict = torch.manual_seed(0 )
__magic_name__ : int = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' ).images
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : str = pipe(num_inference_steps=2 , generator=_A , output_type='numpy' , return_dict=_A )[0]
__magic_name__ : int = image[0, -3:, -3:, -1]
__magic_name__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Optional[int] = 'google/ncsnpp-celebahq-256'
__magic_name__ : List[str] = UNetaDModel.from_pretrained(_A )
__magic_name__ : int = KarrasVeScheduler()
__magic_name__ : str = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = pipe(num_inference_steps=20 , generator=_A , output_type='numpy' ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__magic_name__ : int = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 275
| 0
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self , __a ):
with open(__a , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(__a )
return match
def snake_case ( self , __a ):
with open(__a , encoding="utf-8" ) as input_file:
__lowerCAmelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(__a )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def snake_case ( self ):
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def snake_case ( self ):
__lowerCAmelCase = Path("./datasets" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__a ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 57
|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261
| 0
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A_ : Any = get_logger(__name__)
A_ : Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
A_ : List[Any] = uuida().hex
A_ : List[str] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
A_ : str = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
A_ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase_ ( _lowerCamelCase = None ):
lowerCamelCase__ : Any = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + user_agent
return ua
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
if token is None:
lowerCamelCase__ : List[str] = HfFolder.get_token()
if organization is None:
lowerCamelCase__ : Optional[int] = whoami(__UpperCAmelCase )['name']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__UpperCAmelCase , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__ : int = args.hub_token if hasattr(__UpperCAmelCase , 'hub_token' ) else None
lowerCamelCase__ : Optional[int] = get_full_repo_name(__UpperCAmelCase , token=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__UpperCAmelCase , model_name=__UpperCAmelCase , repo_name=__UpperCAmelCase , dataset_name=args.dataset_name if hasattr(__UpperCAmelCase , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCAmelCase , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__UpperCAmelCase , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__ : Tuple = os.path.join(args.output_dir , 'README.md' )
model_card.save(__UpperCAmelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__ : List[str] = str(Path(__UpperCAmelCase ).as_posix() )
lowerCamelCase__ : Any = re.search(r'snapshots/([^/]+)/' , __UpperCAmelCase )
if search is None:
return None
lowerCamelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A_ : str = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
A_ : Optional[Any] = os.path.join(hf_cache_home, "diffusers")
def lowerCamelCase_ ( _lowerCamelCase = None , _lowerCamelCase = None ):
if new_cache_dir is None:
lowerCamelCase__ : List[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__ : str = old_diffusers_cache
lowerCamelCase__ : List[str] = Path(__UpperCAmelCase ).expanduser()
lowerCamelCase__ : Any = Path(__UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__ : str = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase )
new_blob_path.parent.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
os.replace(__UpperCAmelCase , __UpperCAmelCase )
try:
os.symlink(__UpperCAmelCase , __UpperCAmelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A_ : Tuple = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
A_ : Tuple = 0
else:
with open(cache_version_file) as f:
try:
A_ : Optional[Any] = int(f.read())
except ValueError:
A_ : List[str] = 0
if cache_version < 1:
A_ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
A_ : str = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
"the directory exists and can be written to."
)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
if variant is not None:
lowerCamelCase__ : int = weights_name.split('.' )
lowerCamelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__ : str = '.'.join(__UpperCAmelCase )
return weights_name
def lowerCamelCase_ ( _lowerCamelCase , *,
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
lowerCamelCase__ : Optional[Any] = str(__UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCAmelCase ):
if os.path.isfile(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
lowerCamelCase__ : Optional[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
lowerCamelCase__ : Any = hf_hub_download(
__UpperCAmelCase , filename=_add_variant(__UpperCAmelCase , __UpperCAmelCase ) , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , user_agent=__UpperCAmelCase , subfolder=__UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , __UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase , __UpperCAmelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase , __UpperCAmelCase )}\' so that the correct variant file can be added.''' , __UpperCAmelCase , )
try:
# 2. Load model file as usual
lowerCamelCase__ : Union[str, Any] = hf_hub_download(
__UpperCAmelCase , filename=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , user_agent=__UpperCAmelCase , subfolder=__UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
__A ={
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
__A ={
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
__A ={
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__A ={
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__A ={
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
__A =collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__A =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__A =R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(snake_case_ )
class _SCREAMING_SNAKE_CASE :
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
lowerCamelCase_ = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
lowerCamelCase_ = titles if not isinstance(lowercase , lowercase ) else [titles]
lowerCamelCase_ = texts if not isinstance(lowercase , lowercase ) else [texts]
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = questions if not isinstance(lowercase , lowercase ) else [questions] * n_passages
assert len(lowercase ) == len(
lowercase ), f'There should be as many titles than texts but got {len(lowercase )} titles and {len(lowercase )} texts.'
lowerCamelCase_ = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase )["input_ids"]
lowerCamelCase_ = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase )["input_ids"]
lowerCamelCase_ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
lowerCamelCase_ = reader_input["input_ids"]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reader_output[:3]
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = sorted(range(lowercase ) , reverse=lowercase , key=relevance_logits.__getitem__ )
lowerCamelCase_ = []
for doc_id in sorted_docs:
lowerCamelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
lowerCamelCase_ = []
for start_index, start_score in enumerate(lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ = sorted(lowercase , key=lambda lowercase : x[1] , reverse=lowercase )
lowerCamelCase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowerCamelCase_ = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_ )
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = DPRReaderTokenizer
| 19
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
| 0
|
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : int ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : str=0 ):
return sorted(lowerCAmelCase_, key=lambda lowerCAmelCase_ : x[column] )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Dict=float('inf' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1, lowerCAmelCase_ ):
__lowerCAmelCase = euclidean_distance_sqr(points[i], points[j] )
if current_dis < min_dis:
__lowerCAmelCase = current_dis
return min_dis
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any]=float('inf' ) ):
for i in range(min(6, points_counts - 1 ), lowerCAmelCase_ ):
for j in range(max(0, i - 6 ), lowerCAmelCase_ ):
__lowerCAmelCase = euclidean_distance_sqr(points[i], points[j] )
if current_dis < min_dis:
__lowerCAmelCase = current_dis
return min_dis
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any, lowerCAmelCase_ : List[Any] ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCAmelCase_, lowerCAmelCase_ )
# recursion
__lowerCAmelCase = points_counts // 2
__lowerCAmelCase = closest_pair_of_points_sqr(
lowerCAmelCase_, points_sorted_on_y[:mid], lowerCAmelCase_ )
__lowerCAmelCase = closest_pair_of_points_sqr(
lowerCAmelCase_, points_sorted_on_y[mid:], points_counts - mid )
__lowerCAmelCase = min(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCAmelCase_ )
__lowerCAmelCase = dis_between_closest_in_strip(
lowerCAmelCase_, len(lowerCAmelCase_ ), lowerCAmelCase_ )
return min(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = column_based_sort(lowerCAmelCase_, column=0 )
__lowerCAmelCase = column_based_sort(lowerCAmelCase_, column=1 )
return (
closest_pair_of_points_sqr(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
) ** 0.5
if __name__ == "__main__":
_snake_case : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 207
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def lowercase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__lowerCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = AudioDiffusionPipeline(vqvae=lowerCAmelCase_ , unet=self.dummy_unet , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , steps=4 , return_dict=lowerCAmelCase_ )
__lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowerCAmelCase = DDIMScheduler()
__lowerCAmelCase = self.dummy_vqvae_and_unet
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(raw_audio=lowerCAmelCase_ , generator=lowerCAmelCase_ , start_step=5 , steps=1_0 )
__lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowerCAmelCase = self.dummy_unet_condition
__lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase_ , mel=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
np.random.seed(0 )
__lowerCAmelCase = torch.rand((1, 1, 1_0) )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , encoding=lowerCAmelCase_ )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(4_2 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ )
__lowerCAmelCase = output.audios[0]
__lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__lowerCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 207
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.