code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _A ( lowerCamelCase ):
return EnvironmentCommand()
def _A ( lowerCamelCase ):
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCAmelCase ( _UpperCamelCase ):
@staticmethod
def _snake_case ( snake_case ) -> int:
"""simple docstring"""
a__ : Optional[int] = parser.add_parser("env" )
download_parser.set_defaults(func=snake_case )
download_parser.add_argument(
"--accelerate-config_file" , default=snake_case , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , *snake_case ) -> None:
"""simple docstring"""
a__ : List[str] = accelerate_config_file
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Dict = "not installed"
if is_safetensors_available():
import safetensors
a__ : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
a__ : Union[str, Any] = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
a__ : int = "not installed"
a__ : int = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
a__ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(snake_case ):
a__ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
a__ : Union[str, Any] = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(snake_case , snake_case )
else F"""\t{accelerate_config}"""
)
a__ : str = "not installed"
a__ : int = "NA"
if is_torch_available():
import torch
a__ : Optional[int] = torch.__version__
a__ : int = torch.cuda.is_available()
a__ : str = "not installed"
a__ : int = "NA"
if is_tf_available():
import tensorflow as tf
a__ : int = tf.__version__
try:
# deprecated in v2.1
a__ : Any = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
a__ : int = bool(tf.config.list_physical_devices("GPU" ) )
a__ : str = "not installed"
a__ : Optional[Any] = "not installed"
a__ : Dict = "not installed"
a__ : Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
a__ : Optional[int] = flax.__version__
a__ : List[Any] = jax.__version__
a__ : Optional[Any] = jaxlib.__version__
a__ : Union[str, Any] = jax.lib.xla_bridge.get_backend().platform
a__ : int = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": F"""{safetensors_version}""",
"Accelerate version": F"""{accelerate_version}""",
"Accelerate config": F"""{accelerate_config_str}""",
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": F"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": F"""{flax_version} ({jax_backend})""",
"Jax version": F"""{jax_version}""",
"JaxLib version": F"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(snake_case ) )
return info
@staticmethod
def _snake_case ( snake_case ) -> int:
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _A ( lowerCamelCase = True , *lowerCamelCase , **lowerCamelCase ):
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
a__ : str = False
if main_process_only:
a__ : Dict = PartialState().local_process_index == 0
return _tqdm(*lowerCamelCase , **lowerCamelCase , disable=lowerCamelCase )
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = ShapEImgaImgPipeline
_UpperCamelCase : Any = ["""image"""]
_UpperCamelCase : List[str] = ["""image"""]
_UpperCamelCase : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
_UpperCamelCase : Any = False
@property
def _snake_case ( self ) -> Any:
"""simple docstring"""
return 32
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return 32
@property
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
return 8
@property
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
a__ : Optional[Any] = CLIPVisionModel(snake_case )
return model
@property
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : str = CLIPImageProcessor(
crop_size=224 , do_center_crop=snake_case , do_normalize=snake_case , do_resize=snake_case , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
a__ : Optional[int] = PriorTransformer(**snake_case )
return model
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[str] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
a__ : Optional[Any] = ShapERenderer(**snake_case )
return model
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = self.dummy_prior
a__ : List[Any] = self.dummy_image_encoder
a__ : Union[str, Any] = self.dummy_image_processor
a__ : Optional[int] = self.dummy_renderer
a__ : Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_024 , prediction_type="sample" , use_karras_sigmas=snake_case , clip_sample=snake_case , clip_sample_range=1.0 , )
a__ : str = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Optional[Any]:
"""simple docstring"""
a__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith("mps" ):
a__ : int = torch.manual_seed(snake_case )
else:
a__ : Optional[int] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Optional[Any] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = "cpu"
a__ : str = self.get_dummy_components()
a__ : Tuple = self.pipeline_class(**snake_case )
a__ : Any = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
a__ : Any = pipe(**self.get_dummy_inputs(snake_case ) )
a__ : Tuple = output.images[0]
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a__ : Optional[Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device == "cpu"
a__ : Optional[int] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case , relax_max_difference=snake_case , )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = self.get_dummy_components()
a__ : Union[str, Any] = self.pipeline_class(**snake_case )
a__ : str = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
a__ : Union[str, Any] = 1
a__ : Optional[Any] = 2
a__ : Optional[Any] = self.get_dummy_inputs(snake_case )
for key in inputs.keys():
if key in self.batch_params:
a__ : Optional[Any] = batch_size * [inputs[key]]
a__ : Optional[Any] = pipe(**snake_case , num_images_per_prompt=snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
a__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
a__ : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
a__ : Optional[Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
a__ : int = torch.Generator(device=snake_case ).manual_seed(0 )
a__ : List[Any] = pipe(
snake_case , generator=snake_case , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case , snake_case )
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _snake_case ( *snake_case , **snake_case ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
_UpperCamelCase : Optional[int] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _snake_case ( self , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
a__ : Dict = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def _snake_case ( self , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
a__ : int = vqa_pipeline(snake_case , top_k=1 )
self.assertEqual(
snake_case , [
[{"score": ANY(snake_case ), "answer": ANY(snake_case )}],
[{"score": ANY(snake_case ), "answer": ANY(snake_case )}],
] , )
@require_torch
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : int = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
a__ : Union[str, Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a__ : Any = "How many cats are there?"
a__ : Any = vqa_pipeline(image=snake_case , question="How many cats are there?" , top_k=2 )
self.assertEqual(
snake_case , [{"score": ANY(snake_case ), "answer": ANY(snake_case )}, {"score": ANY(snake_case ), "answer": ANY(snake_case )}] )
a__ : Optional[int] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
snake_case , [{"score": ANY(snake_case ), "answer": ANY(snake_case )}, {"score": ANY(snake_case ), "answer": ANY(snake_case )}] )
@slow
@require_torch
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : int = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
a__ : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a__ : List[str] = "How many cats are there?"
a__ : Optional[Any] = vqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
a__ : Optional[int] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
a__ : Optional[Any] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
def _A ( lowerCamelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
a__ : Tuple = 4
a__ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
a__ : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
(1_0_0_0, """M"""),
(9_0_0, """CM"""),
(5_0_0, """D"""),
(4_0_0, """CD"""),
(1_0_0, """C"""),
(9_0, """XC"""),
(5_0, """L"""),
(4_0, """XL"""),
(1_0, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _A ( lowerCamelCase ):
a__ : str = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a__ : List[Any] = 0
a__ : Any = 0
while place < len(lowerCamelCase ):
if (place + 1 < len(lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( lowerCamelCase ):
a__ : str = []
for arabic, roman in ROMAN:
((a__) , (a__)) : Any = divmod(lowerCamelCase , lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from math import factorial
def _A ( lowerCamelCase , lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(lowerCamelCase ) // (factorial(lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'4 for group projects, there are {combinations(4_0, 4)} ways',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'are {combinations(1_0, 3)} ways that first, second and',
"""third place can be awarded.""",
)
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE__ : str = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def _A ( lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=" " ):
a__ : Union[str, Any] = text.split(lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
def _A ( lowerCamelCase ):
a__ , a__ : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(lowerCamelCase ):
titles.append(title if title is not None else "" )
texts.append(lowerCamelCase )
return {"title": titles, "text": texts}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Dict = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=lowerCamelCase , padding="longest" , return_tensors="pt" )["input_ids"]
a__ : Optional[Any] = ctx_encoder(input_ids.to(device=lowerCamelCase ) , return_dict=lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a__ : List[Any] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a__ : Tuple = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
a__ : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase )
a__ : List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a__ : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
a__ : Optional[Any] = dataset.map(
partial(lowerCamelCase , ctx_encoder=lowerCamelCase , ctx_tokenizer=lowerCamelCase ) , batched=lowerCamelCase , batch_size=processing_args.batch_size , features=lowerCamelCase , )
# And finally save your dataset
a__ : Dict = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=lowerCamelCase )
# And save the index
a__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : str = field(
default=str(Path(_UpperCamelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) ,metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} ,)
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} ,)
_UpperCamelCase : str = field(
default="""facebook/rag-sequence-nq""" ,metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} ,)
_UpperCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" ,metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} ,)
_UpperCamelCase : Optional[str] = field(
default=str(Path(_UpperCamelCase ).parent / """test_run""" / """dummy-kb""" ) ,metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} ,)
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : Optional[int] = field(
default=_UpperCamelCase ,metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} ,)
_UpperCamelCase : int = field(
default=16 ,metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} ,)
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : int = field(
default=768 ,metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} ,)
_UpperCamelCase : int = field(
default=128 ,metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE__ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : int = load_tool("text-to-speech" )
self.tool.setup()
def _snake_case ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Union[str, Any] = self.tool("hey" )
a__ : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : str = self.tool("hey" )
a__ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __lowerCAmelCase ( _UpperCamelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_UpperCamelCase : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
_UpperCamelCase : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
_UpperCamelCase : ClassVar[Features] = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
_UpperCamelCase : str = "question"
_UpperCamelCase : str = "context"
_UpperCamelCase : str = "answers"
@property
def _snake_case ( self ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
import random
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = a[left_index]
a__ : Dict = left_index + 1
for j in range(left_index + 1 , lowerCamelCase ):
if a[j] < pivot:
a__ , a__ : Dict = a[i], a[j]
i += 1
a__ , a__ : int = a[i - 1], a[left_index]
return i - 1
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if left < right:
a__ : Union[str, Any] = random.randint(lowerCamelCase , right - 1 )
a__ , a__ : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
a__ : int = partition(lowerCamelCase , lowerCamelCase , lowerCamelCase )
quick_sort_random(
lowerCamelCase , lowerCamelCase , lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase , pivot_index + 1 , lowerCamelCase ) # recursive quicksort to the right of the pivot point
def _A ( ):
a__ : str = input("Enter numbers separated by a comma:\n" ).strip()
a__ : str = [int(lowerCamelCase ) for item in user_input.split("," )]
quick_sort_random(lowerCamelCase , 0 , len(lowerCamelCase ) )
print(lowerCamelCase )
if __name__ == "__main__":
main()
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _A ( lowerCamelCase , lowerCamelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a__ : Optional[Any] = flax_key_tuple[:-1] + ("weight",)
a__ : int = torch.permute(lowerCamelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase ):
# linear layer
a__ : Any = flax_key_tuple[:-1] + ("weight",)
a__ : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a__ : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if "metadata" in layer:
a__ : int = layer.split("metadata" )
a__ : Any = "".join(split_layer[0] )[:-1]
a__ : Tuple = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
a__ : Union[str, Any] = layer.split("kvstore" )
a__ : Optional[Any] = "".join(split_layer[0] )[:-1]
a__ : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
a__ : Optional[int] = layer.split("/" )
a__ : List[Any] = "/".join(split_layer[:-1] )
a__ : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a__ : Optional[Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
a__ : Dict = "file"
else:
a__ : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = rename_keys(lowerCamelCase )
a__ : Union[str, Any] = {}
for k, v in current_block.items():
a__ : List[str] = v
a__ : str = new_current_block
torch.save(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = WEIGHTS_NAME ):
a__ : Dict = convert_file_size_to_int(lowerCamelCase )
a__ : Tuple = []
a__ : str = {}
a__ : Tuple = 0
a__ : Union[str, Any] = 0
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
a__ : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
a__ : List[str] = flatten_dict(lowerCamelCase , sep="/" )
a__ : List[str] = {}
for layer in checkpoint_info.keys():
a__ , a__ , a__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
if curr_real_layer_name in all_layers:
a__ : List[Any] = content
else:
a__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a__ : int = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a__ : Tuple = torch.tensor(lowerCamelCase )
a__ : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a__ , a__ : Tuple = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase )
a__ : str = "/".join(lowerCamelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a__ : Any = os.path.join(
lowerCamelCase , weights_name.replace(".bin" , F"""-{len(lowerCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCamelCase , lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
a__ : Union[str, Any] = {}
a__ : Dict = 0
a__ : Any = raw_weights.to(getattr(lowerCamelCase , lowerCamelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a__ : int = os.path.join(lowerCamelCase , weights_name.replace(".bin" , F"""-{len(lowerCamelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCamelCase , lowerCamelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a__ : str = {}
a__ : Dict = {}
for idx, shard in enumerate(lowerCamelCase ):
a__ : List[str] = weights_name.replace(
".bin" , F"""-{idx+1:05d}-of-{len(lowerCamelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
a__ : List[Any] = os.path.join(lowerCamelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCamelCase , os.path.join(lowerCamelCase , lowerCamelCase ) )
a__ : Tuple = shard
for key in shard:
a__ : List[str] = shard_file
# Add the metadata
a__ : Union[str, Any] = {"total_size": total_size}
a__ : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase , lowerCamelCase ) , "w" , encoding="utf-8" ) as f:
a__ : Optional[int] = json.dumps(lowerCamelCase , indent=2 , sort_keys=lowerCamelCase ) + "\n"
f.write(lowerCamelCase )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _A ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a__ : Tuple = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
a__ : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
a__ : List[str] = TaTokenizer.from_pretrained("t5-small" )
a__ : List[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
a__ : int = tokenizer(lowerCamelCase , return_tensors="pt" ).input_ids
a__ : Union[str, Any] = model.generate(lowerCamelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Tuple = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a__ : Optional[Any] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a__ : List[Any] = F"""{src_lang}-{tgt_lang}"""
a__ : int = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : int = os.path.join(lowerCamelCase , "README.md" )
print(F"""Generating {path}""" )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(lowerCamelCase )
# make sure we are under the root of the project
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(__file__).resolve().parent.parent.parent
SCREAMING_SNAKE_CASE__ : List[str] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = model_name.split("""-""")
SCREAMING_SNAKE_CASE__ : Any = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
from manim import *
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = Rectangle(height=0.5 , width=0.5 )
a__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
a__ : List[str] = [mem.copy() for i in range(6 )]
a__ : Tuple = [mem.copy() for i in range(6 )]
a__ : Optional[Any] = VGroup(*snake_case ).arrange(snake_case , buff=0 )
a__ : Any = VGroup(*snake_case ).arrange(snake_case , buff=0 )
a__ : Optional[Any] = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
a__ : Union[str, Any] = Text("CPU" , font_size=24 )
a__ : Tuple = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
a__ : int = [mem.copy() for i in range(1 )]
a__ : Optional[int] = VGroup(*snake_case ).arrange(snake_case , buff=0 )
a__ : Optional[Any] = Text("GPU" , font_size=24 )
a__ : Union[str, Any] = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.align_to(snake_case , snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case )
a__ : Dict = [mem.copy() for i in range(6 )]
a__ : str = VGroup(*snake_case ).arrange(snake_case , buff=0 )
a__ : Tuple = Text("Model" , font_size=24 )
a__ : Any = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) , )
a__ : List[Any] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
a__ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=2.5 ) , Write(snake_case ) , Write(snake_case ) )
self.add(snake_case )
a__ : int = []
a__ : Any = []
a__ : str = []
for i, rect in enumerate(snake_case ):
a__ : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
cpu_target.move_to(snake_case )
cpu_target.generate_target()
a__ : Optional[int] = 0.46 / 4
a__ : Dict = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case , buff=0.0 )
cpu_targs.append(snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case ) )
second_animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(*snake_case )
self.wait()
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""YolosFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : Dict = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCamelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
_UpperCamelCase : bool = field(default=_UpperCamelCase ,metadata={"""help""": """Whether tp freeze the encoder."""} )
_UpperCamelCase : bool = field(default=_UpperCamelCase ,metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
_UpperCamelCase : Optional[str] = field(
default="""summarization""" ,metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} ,)
_UpperCamelCase : Optional[int] = field(
default=1024 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
_UpperCamelCase : Optional[int] = field(
default=128 ,metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
_UpperCamelCase : Optional[int] = field(
default=142 ,metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} ,)
_UpperCamelCase : Optional[int] = field(
default=142 ,metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
_UpperCamelCase : Optional[int] = field(default=-1 ,metadata={"""help""": """# training examples. -1 means use all."""} )
_UpperCamelCase : Optional[int] = field(default=-1 ,metadata={"""help""": """# validation examples. -1 means use all."""} )
_UpperCamelCase : Optional[int] = field(default=-1 ,metadata={"""help""": """# test examples. -1 means use all."""} )
_UpperCamelCase : Optional[str] = field(default=_UpperCamelCase ,metadata={"""help""": """Source language id for translation."""} )
_UpperCamelCase : Optional[str] = field(default=_UpperCamelCase ,metadata={"""help""": """Target language id for translation."""} )
_UpperCamelCase : Optional[int] = field(default=_UpperCamelCase ,metadata={"""help""": """# num_beams to use for evaluation."""} )
_UpperCamelCase : bool = field(
default=_UpperCamelCase ,metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} ,)
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCamelCase , os.path.join(lowerCamelCase , F"""{split}_results.json""" ) )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a__ , a__ , a__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a__ , a__ , a__ : Dict = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__ : Optional[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
assert hasattr(lowerCamelCase , lowerCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
a__ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
a__ : Tuple = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
a__ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
a__ : str = SeqaSeqDataset
# Get datasets
a__ : List[Any] = (
dataset_class(
lowerCamelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
a__ : List[str] = (
dataset_class(
lowerCamelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
a__ : List[str] = (
dataset_class(
lowerCamelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
a__ : Any = (
build_compute_metrics_fn(data_args.task , lowerCamelCase ) if training_args.predict_with_generate else None
)
a__ : Optional[int] = SeqaSeqTrainer(
model=lowerCamelCase , args=lowerCamelCase , data_args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , data_collator=SeqaSeqDataCollator(
lowerCamelCase , lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase , tokenizer=lowerCamelCase , )
a__ : Any = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
a__ : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
a__ : Optional[Any] = train_result.metrics
a__ : List[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : str = trainer.evaluate(metric_key_prefix="val" )
a__ : Any = data_args.n_val
a__ : Tuple = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
a__ : Optional[Any] = trainer.predict(test_dataset=lowerCamelCase , metric_key_prefix="test" )
a__ : int = test_output.metrics
a__ : Union[str, Any] = data_args.n_test
if trainer.is_world_process_zero():
a__ : Any = round(metrics["test_loss"] , 4 )
handle_metrics("test" , lowerCamelCase , training_args.output_dir )
all_metrics.update(lowerCamelCase )
if training_args.predict_with_generate:
a__ : Union[str, Any] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
a__ : int = lmap(str.strip , lowerCamelCase )
write_txt_file(lowerCamelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = """donut-swin"""
_UpperCamelCase : int = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , **snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(**snake_case )
a__ : List[str] = image_size
a__ : str = patch_size
a__ : List[Any] = num_channels
a__ : Optional[Any] = embed_dim
a__ : Union[str, Any] = depths
a__ : Tuple = len(snake_case )
a__ : Any = num_heads
a__ : Optional[int] = window_size
a__ : int = mlp_ratio
a__ : Optional[Any] = qkv_bias
a__ : List[str] = hidden_dropout_prob
a__ : Optional[Any] = attention_probs_dropout_prob
a__ : List[str] = drop_path_rate
a__ : List[Any] = hidden_act
a__ : Tuple = use_absolute_embeddings
a__ : Any = layer_norm_eps
a__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a__ : List[Any] = int(embed_dim * 2 ** (len(snake_case ) - 1) )
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
_UpperCamelCase : int
_UpperCamelCase : TreeNode | None = None
_UpperCamelCase : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : Tuple = namedtuple("""CoinsDistribResult""", """moves excess""")
def _A ( lowerCamelCase ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCamelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a__ , a__ : int = get_distrib(node.left )
a__ , a__ : Union[str, Any] = get_distrib(node.right )
a__ : Dict = 1 - left_distrib_excess
a__ : Optional[Any] = 1 - right_distrib_excess
a__ : Optional[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
a__ : List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase , lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
from typing import Any
def _A ( lowerCamelCase ):
if not input_list:
return []
a__ : Tuple = [input_list.count(lowerCamelCase ) for value in input_list]
a__ : Union[str, Any] = max(lowerCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : int = 6_5_5_2_1
def _A ( lowerCamelCase ):
a__ : List[str] = 1
a__ : Optional[int] = 0
for plain_chr in plain_text:
a__ : Union[str, Any] = (a + ord(lowerCamelCase )) % MOD_ADLER
a__ : Union[str, Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Any = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""ChineseCLIPFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : List[Any] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : str = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
SCREAMING_SNAKE_CASE__ : str = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : List[str] = AudioLDMPipeline
_UpperCamelCase : List[str] = TEXT_TO_AUDIO_PARAMS
_UpperCamelCase : Any = TEXT_TO_AUDIO_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _snake_case ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
a__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=snake_case , )
a__ : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
a__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a__ : Union[str, Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
a__ : int = ClapTextModelWithProjection(snake_case )
a__ : Optional[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
a__ : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case , )
a__ : str = SpeechTaHifiGan(snake_case )
a__ : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> List[str]:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : int = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : int = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Union[str, Any] = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**snake_case )
a__ : Optional[int] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : List[Any] = self.get_dummy_inputs(snake_case )
a__ : List[Any] = audioldm_pipe(**snake_case )
a__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 256
a__ : List[Any] = audio[:10]
a__ : int = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : List[Any] = AudioLDMPipeline(**snake_case )
a__ : Optional[Any] = audioldm_pipe.to(snake_case )
a__ : Union[str, Any] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Tuple = self.get_dummy_inputs(snake_case )
a__ : List[str] = 3 * [inputs["prompt"]]
# forward
a__ : str = audioldm_pipe(**snake_case )
a__ : Any = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(snake_case )
a__ : List[Any] = 3 * [inputs.pop("prompt" )]
a__ : str = audioldm_pipe.tokenizer(
snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
a__ : int = text_inputs["input_ids"].to(snake_case )
a__ : str = audioldm_pipe.text_encoder(
snake_case , )
a__ : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : int = F.normalize(snake_case , dim=-1 )
a__ : List[str] = prompt_embeds
# forward
a__ : str = audioldm_pipe(**snake_case )
a__ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = self.get_dummy_components()
a__ : int = AudioLDMPipeline(**snake_case )
a__ : Optional[int] = audioldm_pipe.to(snake_case )
a__ : Union[str, Any] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Optional[Any] = self.get_dummy_inputs(snake_case )
a__ : Dict = 3 * ["this is a negative prompt"]
a__ : Union[str, Any] = negative_prompt
a__ : Any = 3 * [inputs["prompt"]]
# forward
a__ : Tuple = audioldm_pipe(**snake_case )
a__ : str = output.audios[0]
a__ : Dict = self.get_dummy_inputs(snake_case )
a__ : List[str] = 3 * [inputs.pop("prompt" )]
a__ : List[Any] = []
for p in [prompt, negative_prompt]:
a__ : Optional[Any] = audioldm_pipe.tokenizer(
snake_case , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors="pt" , )
a__ : Any = text_inputs["input_ids"].to(snake_case )
a__ : Any = audioldm_pipe.text_encoder(
snake_case , )
a__ : str = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Tuple = F.normalize(snake_case , dim=-1 )
embeds.append(snake_case )
a__ , a__ : Any = embeds
# forward
a__ : Dict = audioldm_pipe(**snake_case )
a__ : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Union[str, Any] = self.get_dummy_components()
a__ : int = PNDMScheduler(skip_prk_steps=snake_case )
a__ : str = AudioLDMPipeline(**snake_case )
a__ : Union[str, Any] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Any = self.get_dummy_inputs(snake_case )
a__ : Optional[Any] = "egg cracking"
a__ : List[Any] = audioldm_pipe(**snake_case , negative_prompt=snake_case )
a__ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 256
a__ : List[Any] = audio[:10]
a__ : Dict = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components()
a__ : List[str] = PNDMScheduler(skip_prk_steps=snake_case )
a__ : Dict = AudioLDMPipeline(**snake_case )
a__ : int = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Any = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
a__ : Union[str, Any] = audioldm_pipe(snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
a__ : Dict = 2
a__ : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
a__ : Dict = 2
a__ : Union[str, Any] = audioldm_pipe(snake_case , num_inference_steps=2 , num_waveforms_per_prompt=snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
a__ : Dict = 2
a__ : Optional[int] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : int = self.get_dummy_components()
a__ : List[str] = AudioLDMPipeline(**snake_case )
a__ : Union[str, Any] = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Dict = audioldm_pipe.vocoder.config.sampling_rate
a__ : Union[str, Any] = self.get_dummy_inputs(snake_case )
a__ : Tuple = audioldm_pipe(audio_length_in_s=0.016 , **snake_case )
a__ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) / vocoder_sampling_rate == 0.016
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.032 , **snake_case )
a__ : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case ) / vocoder_sampling_rate == 0.032
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Dict = self.get_dummy_components()
a__ : Any = AudioLDMPipeline(**snake_case )
a__ : str = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Any = ["hey"]
a__ : int = audioldm_pipe(snake_case , num_inference_steps=1 )
a__ : List[str] = output.audios.shape
assert audio_shape == (1, 256)
a__ : Dict = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
a__ : List[str] = SpeechTaHifiGan(snake_case ).to(snake_case )
a__ : Tuple = audioldm_pipe(snake_case , num_inference_steps=1 )
a__ : Dict = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Union[str, Any] = np.random.RandomState(snake_case ).standard_normal((1, 8, 128, 16) )
a__ : Tuple = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
a__ : Dict = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
a__ : Any = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : Any = self.get_inputs(snake_case )
a__ : str = 25
a__ : Tuple = audioldm_pipe(**snake_case ).audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 81_920
a__ : Any = audio[77_230:77_240]
a__ : Union[str, Any] = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
a__ : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
a__ : Union[str, Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
a__ : Dict = audioldm_pipe.to(snake_case )
audioldm_pipe.set_progress_bar_config(disable=snake_case )
a__ : str = self.get_inputs(snake_case )
a__ : str = audioldm_pipe(**snake_case ).audios[0]
assert audio.ndim == 1
assert len(snake_case ) == 81_920
a__ : Optional[Any] = audio[27_780:27_790]
a__ : List[str] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
a__ : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import argparse
import json
import subprocess
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = []
a__ : Any = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
a__ : List[Any] = subprocess.run(lowerCamelCase , shell=lowerCamelCase , stdout=subprocess.PIPE )
a__ : Tuple = output.stdout.decode("utf-8" )
a__ : Any = json.loads(lowerCamelCase )
a__ : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
a__ : List[str] = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _A ( lowerCamelCase ):
return values.split("," )
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import os
from collections.abc import Iterator
def _A ( lowerCamelCase = "." ):
for dir_path, dir_names, filenames in os.walk(lowerCamelCase ):
a__ : List[str] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase , lowerCamelCase ).lstrip("./" )
def _A ( lowerCamelCase ):
return F"""{i * " "}*""" if i else "\n##"
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : str = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(lowerCamelCase )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def _A ( lowerCamelCase = "." ):
a__ : Union[str, Any] = ""
for filepath in sorted(good_file_paths(lowerCamelCase ) ):
a__ , a__ : List[str] = os.path.split(lowerCamelCase )
if filepath != old_path:
a__ : Tuple = print_path(lowerCamelCase , lowerCamelCase )
a__ : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
a__ : List[str] = F"""{filepath}/{filename}""".replace(" " , "%20" )
a__ : Optional[Any] = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F"""{md_prefix(lowerCamelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
def _A ( lowerCamelCase ):
a__ : int = len(lowerCamelCase )
a__ : Any = len(matrix[0] )
a__ : Dict = min(lowerCamelCase , lowerCamelCase )
for row in range(lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase ):
a__ : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase , lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a__ : Optional[Any] = True
for i in range(row + 1 , lowerCamelCase ):
if matrix[i][row] != 0:
a__ , a__ : List[str] = matrix[i], matrix[row]
a__ : Optional[Any] = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase ):
a__ : int = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _A ( lowerCamelCase ):
a__ : Union[str, Any] = botoa.client("iam" )
a__ : Dict = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowerCamelCase , AssumeRolePolicyDocument=json.dumps(lowerCamelCase , indent=2 ) )
a__ : Optional[Any] = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowerCamelCase , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _A ( lowerCamelCase ):
a__ : Union[str, Any] = botoa.client("iam" )
return iam_client.get_role(RoleName=lowerCamelCase )["Role"]["Arn"]
def _A ( ):
a__ : Any = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowerCamelCase , )
a__ : List[str] = None
if credentials_configuration == 0:
a__ : Tuple = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
a__ : Optional[int] = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
a__ : Optional[Any] = _ask_field("AWS Access Key ID: " )
a__ : Tuple = aws_access_key_id
a__ : List[str] = _ask_field("AWS Secret Access Key: " )
a__ : Optional[Any] = aws_secret_access_key
a__ : List[str] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
a__ : Union[str, Any] = aws_region
a__ : Tuple = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowerCamelCase , )
if role_management == 0:
a__ : Any = _ask_field("Enter your IAM role name: " )
else:
a__ : Optional[Any] = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowerCamelCase )
a__ : List[Any] = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
a__ : Optional[int] = None
if is_custom_docker_image:
a__ : Optional[int] = _ask_field("Enter your Docker image: " , lambda lowerCamelCase : str(lowerCamelCase ).lower() )
a__ : Optional[Any] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
a__ : List[str] = None
if is_sagemaker_inputs_enabled:
a__ : Any = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowerCamelCase : str(lowerCamelCase ).lower() , )
a__ : Tuple = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
a__ : Optional[int] = None
if is_sagemaker_metrics_enabled:
a__ : Optional[Any] = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowerCamelCase : str(lowerCamelCase ).lower() , )
a__ : int = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
a__ : Any = {}
a__ : Any = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
if use_dynamo:
a__ : List[Any] = "dynamo_"
a__ : Optional[int] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
a__ : List[Any] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
if use_custom_options:
a__ : Tuple = _ask_options(
"Which mode do you want to use?" , lowerCamelCase , lambda lowerCamelCase : TORCH_DYNAMO_MODES[int(lowerCamelCase )] , default="default" , )
a__ : Optional[Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
a__ : Union[str, Any] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowerCamelCase , error_message="Please enter yes or no." , )
a__ : Any = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
a__ : Dict = _ask_options(
lowerCamelCase , lowerCamelCase , lambda lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
a__ : Union[str, Any] = _ask_field(lowerCamelCase , lambda lowerCamelCase : str(lowerCamelCase ).lower() , default="ml.p3.2xlarge" )
a__ : Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
a__ : Optional[Any] = _ask_field(
"How many machines do you want use? [1]: " , lowerCamelCase , default=1 , )
a__ : List[str] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowerCamelCase , use_cpu=lowerCamelCase , dynamo_config=lowerCamelCase , eca_instance_type=lowerCamelCase , profile=lowerCamelCase , region=lowerCamelCase , iam_role_name=lowerCamelCase , mixed_precision=lowerCamelCase , num_machines=lowerCamelCase , sagemaker_inputs_file=lowerCamelCase , sagemaker_metrics_file=lowerCamelCase , )
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ , a__ : str = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
a__ : List[str] = "A painting of a squirrel eating a burger"
a__ : List[Any] = jax.device_count()
a__ : Union[str, Any] = num_samples * [prompt]
a__ : str = sd_pipe.prepare_inputs(snake_case )
a__ : int = replicate(snake_case )
a__ : List[Any] = shard(snake_case )
a__ : Optional[Any] = jax.random.PRNGKey(0 )
a__ : List[Any] = jax.random.split(snake_case , jax.device_count() )
a__ : List[Any] = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=25 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
a__ : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ : List[Any] = images[0, 253:256, 253:256, -1]
a__ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ : Tuple = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = "stabilityai/stable-diffusion-2"
a__ , a__ : str = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case , subfolder="scheduler" )
a__ , a__ : int = FlaxStableDiffusionPipeline.from_pretrained(
snake_case , scheduler=snake_case , revision="bf16" , dtype=jnp.bfloataa , )
a__ : int = scheduler_params
a__ : Optional[int] = "A painting of a squirrel eating a burger"
a__ : Tuple = jax.device_count()
a__ : Optional[int] = num_samples * [prompt]
a__ : Any = sd_pipe.prepare_inputs(snake_case )
a__ : Any = replicate(snake_case )
a__ : Union[str, Any] = shard(snake_case )
a__ : str = jax.random.PRNGKey(0 )
a__ : str = jax.random.split(snake_case , jax.device_count() )
a__ : Any = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=25 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
a__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ : str = images[0, 253:256, 253:256, -1]
a__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ : str = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = """layoutlmv3"""
def __init__( self , snake_case=50_265 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-5 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case=1_024 , snake_case=128 , snake_case=128 , snake_case=True , snake_case=32 , snake_case=128 , snake_case=64 , snake_case=256 , snake_case=True , snake_case=True , snake_case=True , snake_case=224 , snake_case=3 , snake_case=16 , snake_case=None , **snake_case , ) -> str:
"""simple docstring"""
super().__init__(
vocab_size=snake_case , hidden_size=snake_case , num_hidden_layers=snake_case , num_attention_heads=snake_case , intermediate_size=snake_case , hidden_act=snake_case , hidden_dropout_prob=snake_case , attention_probs_dropout_prob=snake_case , max_position_embeddings=snake_case , type_vocab_size=snake_case , initializer_range=snake_case , layer_norm_eps=snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
a__ : Optional[int] = max_ad_position_embeddings
a__ : Optional[Any] = coordinate_size
a__ : int = shape_size
a__ : Dict = has_relative_attention_bias
a__ : int = rel_pos_bins
a__ : Any = max_rel_pos
a__ : List[Any] = has_spatial_attention_bias
a__ : Optional[int] = rel_ad_pos_bins
a__ : List[str] = max_rel_ad_pos
a__ : Union[str, Any] = text_embed
a__ : Optional[Any] = visual_embed
a__ : Optional[Any] = input_size
a__ : Union[str, Any] = num_channels
a__ : str = patch_size
a__ : Any = classifier_dropout
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = version.parse("""1.12""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-5
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return 12
def _snake_case ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 3 , snake_case = 40 , snake_case = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , snake_case )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ : List[Any] = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ : Tuple = processor.tokenizer.num_special_tokens_to_add(snake_case )
a__ : Optional[int] = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
a__ : Dict = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a__ : Optional[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a__ : Tuple = self._generate_dummy_images(snake_case , snake_case , snake_case , snake_case )
a__ : Optional[int] = dict(
processor(
snake_case , text=snake_case , boxes=snake_case , return_tensors=snake_case , ) )
return inputs
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[Any] = CodeGenTokenizer
_UpperCamelCase : str = CodeGenTokenizerFast
_UpperCamelCase : Any = True
_UpperCamelCase : int = {"""add_prefix_space""": True}
_UpperCamelCase : List[Any] = False
def _snake_case ( self ) -> int:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
a__ : List[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
a__ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a__ : Optional[int] = {"unk_token": "<unk>"}
a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def _snake_case ( self , **snake_case ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def _snake_case ( self , **snake_case ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def _snake_case ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = "lower newer"
a__ : Optional[Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : Union[str, Any] = "lower newer"
a__ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
a__ : Dict = tokenizer.tokenize(snake_case , add_prefix_space=snake_case )
self.assertListEqual(snake_case , snake_case )
a__ : Optional[Any] = tokens + [tokenizer.unk_token]
a__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : Optional[Any] = self.get_tokenizer()
a__ : Any = self.get_rust_tokenizer(add_prefix_space=snake_case )
a__ : str = "lower newer"
# Testing tokenization
a__ : List[Any] = tokenizer.tokenize(snake_case , add_prefix_space=snake_case )
a__ : Optional[Any] = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids without special tokens
a__ : Union[str, Any] = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case )
a__ : Dict = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids with special tokens
a__ : Dict = self.get_rust_tokenizer(add_prefix_space=snake_case )
a__ : str = tokenizer.encode(snake_case , add_prefix_space=snake_case )
a__ : str = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing the unknown token
a__ : int = tokens + [rust_tokenizer.unk_token]
a__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def _snake_case ( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
pass
def _snake_case ( self , snake_case=15 ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a__ : Any = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
a__ : Any = "This is a simple input"
a__ : int = ["This is a simple input 1", "This is a simple input 2"]
a__ : List[str] = ("This is a simple input", "This is a pair")
a__ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding="max_length" )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding="max_length" )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding="max_length" , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding="max_length" )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding="max_length" )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding="max_length" , )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
a__ : Tuple = "This is a simple input"
a__ : Any = ["This is a simple input looooooooong", "This is a simple input"]
a__ : Optional[int] = ("This is a simple input", "This is a pair")
a__ : Union[str, Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
a__ : Optional[int] = tokenizer.pad_token_id
a__ : str = tokenizer(snake_case , padding="max_length" , max_length=30 , return_tensors="np" )
a__ : List[str] = tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors="np" )
a__ : Optional[Any] = tokenizer(*snake_case , padding="max_length" , max_length=60 , return_tensors="np" )
a__ : Optional[Any] = tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = "$$$"
a__ : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case , add_bos_token=snake_case )
a__ : Optional[int] = "This is a simple input"
a__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
a__ : Tuple = tokenizer.bos_token_id
a__ : Any = tokenizer(snake_case )
a__ : Any = tokenizer(snake_case )
self.assertEqual(out_s.input_ids[0] , snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a__ : int = tokenizer.decode(out_s.input_ids )
a__ : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
a__ : Dict = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
a__ : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
a__ : List[str] = tokenizer.encode(snake_case )
a__ : Tuple = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
a__ : Optional[Any] = tokenizer.decode(snake_case , truncate_before_pattern=snake_case )
self.assertEqual(snake_case , snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
pass
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["""BeitFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : str = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , **snake_case ) -> Dict:
"""simple docstring"""
super().__init__(**snake_case )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(snake_case )
def __call__( self , snake_case , snake_case = None , **snake_case , ) -> List[Any]:
"""simple docstring"""
if "text_queries" in kwargs:
a__ : List[Any] = kwargs.pop("text_queries" )
if isinstance(snake_case , (str, Image.Image) ):
a__ : Any = {"image": image, "candidate_labels": candidate_labels}
else:
a__ : Tuple = image
a__ : Tuple = super().__call__(snake_case , **snake_case )
return results
def _snake_case ( self , **snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = {}
if "threshold" in kwargs:
a__ : int = kwargs["threshold"]
if "top_k" in kwargs:
a__ : Tuple = kwargs["top_k"]
return {}, {}, postprocess_params
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
a__ : Any = load_image(inputs["image"] )
a__ : int = inputs["candidate_labels"]
if isinstance(snake_case , snake_case ):
a__ : List[str] = candidate_labels.split("," )
a__ : str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case ):
a__ : Union[str, Any] = self.tokenizer(snake_case , return_tensors=self.framework )
a__ : int = self.image_processor(snake_case , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = model_inputs.pop("target_size" )
a__ : int = model_inputs.pop("candidate_label" )
a__ : Dict = model_inputs.pop("is_last" )
a__ : Tuple = self.model(**snake_case )
a__ : str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _snake_case ( self , snake_case , snake_case=0.1 , snake_case=None ) -> Any:
"""simple docstring"""
a__ : Tuple = []
for model_output in model_outputs:
a__ : int = model_output["candidate_label"]
a__ : str = BaseModelOutput(snake_case )
a__ : int = self.image_processor.post_process_object_detection(
outputs=snake_case , threshold=snake_case , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
a__ : Tuple = outputs["scores"][index].item()
a__ : str = self._get_bounding_box(outputs["boxes"][index][0] )
a__ : List[Any] = {"score": score, "label": label, "box": box}
results.append(snake_case )
a__ : Union[str, Any] = sorted(snake_case , key=lambda snake_case : x["score"] , reverse=snake_case )
if top_k:
a__ : str = results[:top_k]
return results
def _snake_case ( self , snake_case ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
a__ , a__ , a__ , a__ : Any = box.int().tolist()
a__ : List[str] = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( _UpperCamelCase ):
@slow
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
a__ : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
a__ : List[Any] = bertabert.config.encoder.vocab_size
a__ : Optional[int] = tokenizer.sep_token_id
a__ : Optional[Any] = tokenizer.cls_token_id
a__ : Any = 128
a__ : Optional[int] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
a__ : Optional[int] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
a__ : Tuple = train_dataset.select(range(32 ) )
a__ : Optional[Any] = val_dataset.select(range(16 ) )
a__ : int = 4
def _map_to_encoder_decoder_inputs(snake_case ):
# Tokenizer will automatically set [BOS] <text> [EOS]
a__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=snake_case , max_length=512 )
a__ : int = tokenizer(batch["highlights"] , padding="max_length" , truncation=snake_case , max_length=128 )
a__ : str = inputs.input_ids
a__ : Any = inputs.attention_mask
a__ : Tuple = outputs.input_ids
a__ : List[str] = outputs.input_ids.copy()
a__ : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
a__ : int = outputs.attention_mask
assert all(len(snake_case ) == 512 for x in inputs.input_ids )
assert all(len(snake_case ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case ):
a__ : Any = pred.label_ids
a__ : int = pred.predictions
# all unnecessary tokens are removed
a__ : Tuple = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
a__ : Union[str, Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
a__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(snake_case ) )] ) / len(snake_case )
return {"accuracy": accuracy}
# map train dataset
a__ : Dict = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case , batch_size=snake_case , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
a__ : Tuple = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=snake_case , batch_size=snake_case , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
a__ : Dict = self.get_auto_remove_tmp_dir()
a__ : Optional[int] = SeqaSeqTrainingArguments(
output_dir=snake_case , per_device_train_batch_size=snake_case , per_device_eval_batch_size=snake_case , predict_with_generate=snake_case , evaluation_strategy="steps" , do_train=snake_case , do_eval=snake_case , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
a__ : List[str] = SeqaSeqTrainer(
model=snake_case , args=snake_case , compute_metrics=_compute_metrics , train_dataset=snake_case , eval_dataset=snake_case , tokenizer=snake_case , )
# start training
trainer.train()
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
def _A ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def _A ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def _A ( lowerCamelCase = 1_0000 ):
a__ : Any = []
for num in range(1 , lowerCamelCase ):
a__ : int = 0
a__ : Any = num
while iterations < 50:
a__ : Tuple = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowerCAmelCase :
_UpperCamelCase : Tuple = BlenderbotConfig
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : List[Any] = """gelu"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=False , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case=0.1 , snake_case=0.1 , snake_case=20 , snake_case=2 , snake_case=1 , snake_case=0 , ) -> Optional[int]:
"""simple docstring"""
a__ : Any = parent
a__ : Optional[Any] = batch_size
a__ : Tuple = seq_length
a__ : Dict = is_training
a__ : Union[str, Any] = use_labels
a__ : int = vocab_size
a__ : Any = hidden_size
a__ : Any = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Optional[int] = hidden_dropout_prob
a__ : List[Any] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : List[Any] = eos_token_id
a__ : List[str] = pad_token_id
a__ : List[Any] = bos_token_id
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a__ : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a__ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a__ : Union[str, Any] = prepare_blenderbot_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def _snake_case ( self , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = TFBlenderbotModel(config=snake_case ).get_decoder()
a__ : Optional[int] = inputs_dict["input_ids"]
a__ : Tuple = input_ids[:1, :]
a__ : str = inputs_dict["attention_mask"][:1, :]
a__ : Optional[int] = inputs_dict["head_mask"]
a__ : Any = 1
# first forward pass
a__ : str = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case )
a__ , a__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a__ : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
a__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a__ : int = model(snake_case , attention_mask=snake_case )[0]
a__ : Tuple = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
a__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case , snake_case , rtol=1E-3 )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
a__ : List[Any] = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a__ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a__ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_UpperCamelCase : str = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : Dict = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Any = True
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : str = TFBlenderbotModelTester(self )
a__ : Optional[int] = ConfigTester(self , config_class=snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case )
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
_UpperCamelCase : Dict = ["""My friends are cool but they eat too many carbs."""]
_UpperCamelCase : List[Any] = """facebook/blenderbot-400M-distill"""
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
a__ : Optional[Any] = self.model.generate(
model_inputs.input_ids , )
a__ : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : str = ["""input_values""", """padding_mask"""]
def __init__( self , snake_case = 1 , snake_case = 24_000 , snake_case = 0.0 , snake_case = None , snake_case = None , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
a__ : Any = chunk_length_s
a__ : Optional[int] = overlap
@property
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , snake_case , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
a__ : int = True
a__ : Any = bool(
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
a__ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
a__ : List[str] = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
a__ : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
a__ : int = [np.asarray(snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(snake_case ):
if example.ndim > 2:
raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" )
a__ : Any = None
a__ : List[str] = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
a__ : int = min(array.shape[0] for array in raw_audio )
a__ : Optional[Any] = int(np.floor(max_length / self.chunk_stride ) )
a__ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
a__ : Any = max(array.shape[0] for array in raw_audio )
a__ : int = int(np.ceil(max_length / self.chunk_stride ) )
a__ : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
a__ : Optional[int] = "max_length"
else:
a__ : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
a__ : str = self.pad(
snake_case , max_length=snake_case , truncation=snake_case , padding=snake_case , return_attention_mask=snake_case , )
if padding:
a__ : List[Any] = padded_inputs.pop("attention_mask" )
a__ : int = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
a__ : List[Any] = example[..., None]
input_values.append(example.T )
a__ : int = input_values
if return_tensors is not None:
a__ : Tuple = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
from math import factorial
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
a__ : str = real
if isinstance(snake_case , snake_case ):
a__ : int = [1] * rank
else:
a__ : Any = rank
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
return (
F"""{self.real}+"""
F"""{"+".join(str(snake_case )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self , snake_case ) -> Optional[int]:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
a__ : Union[str, Any] = self.duals.copy()
a__ : List[Any] = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
a__ : Union[str, Any] = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_UpperCamelCase : List[str] = __add__
def __sub__( self , snake_case ) -> Optional[int]:
"""simple docstring"""
return self + other * -1
def __mul__( self , snake_case ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
a__ : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_UpperCamelCase : List[Any] = __mul__
def __truediv__( self , snake_case ) -> Tuple:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
a__ : List[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
a__ : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self , snake_case ) -> Any:
"""simple docstring"""
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
a__ : Optional[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if not callable(lowerCamelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("differentiate() requires an int as input for order" )
a__ : Tuple = Dual(lowerCamelCase , 1 )
a__ : Any = func(lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _A ( lowerCamelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
import math
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = len(lowerCamelCase )
a__ : int = int(math.floor(math.sqrt(lowerCamelCase ) ) )
a__ : str = 0
while arr[min(lowerCamelCase , lowerCamelCase ) - 1] < x:
a__ : Dict = step
step += int(math.floor(math.sqrt(lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a__ : List[Any] = prev + 1
if prev == min(lowerCamelCase , lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
SCREAMING_SNAKE_CASE__ : Optional[int] = int(input("""Enter the number to be searched:\n"""))
SCREAMING_SNAKE_CASE__ : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f'Number {x} is at index {res}')
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ["""MobileNetV2FeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return round(float(moles / volume ) * nfactor )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = tempfile.mkdtemp()
a__ : Optional[int] = BlipImageProcessor()
a__ : Any = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
a__ : Tuple = BlipaProcessor(snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **snake_case ) -> List[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def _snake_case ( self , **snake_case ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def _snake_case ( self ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ : int = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
a__ : Optional[int] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : str = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
a__ : str = self.prepare_image_inputs()
a__ : Dict = image_processor(snake_case , return_tensors="np" )
a__ : Optional[int] = processor(images=snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.get_image_processor()
a__ : int = self.get_tokenizer()
a__ : Union[str, Any] = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
a__ : Optional[int] = "lower newer"
a__ : Dict = processor(text=snake_case )
a__ : str = tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = self.get_image_processor()
a__ : Dict = self.get_tokenizer()
a__ : Union[str, Any] = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
a__ : List[Any] = "lower newer"
a__ : Any = self.prepare_image_inputs()
a__ : Any = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : List[Any] = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
a__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : List[Any] = processor.batch_decode(snake_case )
a__ : int = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = self.get_image_processor()
a__ : Optional[int] = self.get_tokenizer()
a__ : Optional[Any] = BlipaProcessor(tokenizer=snake_case , image_processor=snake_case )
a__ : Tuple = "lower newer"
a__ : Dict = self.prepare_image_inputs()
a__ : str = processor(text=snake_case , images=snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , ) -> Optional[Any]:
"""simple docstring"""
a__ : int = parent
a__ : Optional[int] = 13
a__ : Tuple = 7
a__ : Union[str, Any] = True
a__ : Dict = True
a__ : Union[str, Any] = True
a__ : int = True
a__ : str = True
a__ : str = False
a__ : Tuple = False
a__ : str = False
a__ : Any = 2
a__ : Optional[int] = 99
a__ : Any = 0
a__ : Tuple = 32
a__ : List[str] = 2
a__ : Optional[int] = 4
a__ : List[str] = 0.1
a__ : str = 0.1
a__ : Tuple = 512
a__ : Any = 16
a__ : Optional[Any] = 2
a__ : Dict = 0.02
a__ : Dict = 3
a__ : List[str] = 4
a__ : Tuple = "last"
a__ : Any = True
a__ : List[Any] = None
a__ : Union[str, Any] = 0
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : int = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
a__ : List[str] = None
if self.use_input_lengths:
a__ : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
a__ : str = None
if self.use_token_type_ids:
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
a__ : Union[str, Any] = None
a__ : Dict = None
a__ : Dict = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
a__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
a__ : List[str] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : Optional[Any] = TFFlaubertModel(config=snake_case )
a__ : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a__ : Dict = model(snake_case )
a__ : List[str] = [input_ids, input_mask]
a__ : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Optional[Any]:
"""simple docstring"""
a__ : int = TFFlaubertWithLMHeadModel(snake_case )
a__ : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
a__ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[str]:
"""simple docstring"""
a__ : str = TFFlaubertForQuestionAnsweringSimple(snake_case )
a__ : str = {"input_ids": input_ids, "lengths": input_lengths}
a__ : Any = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> str:
"""simple docstring"""
a__ : Tuple = TFFlaubertForSequenceClassification(snake_case )
a__ : Dict = {"input_ids": input_ids, "lengths": input_lengths}
a__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> str:
"""simple docstring"""
a__ : List[str] = self.num_labels
a__ : List[str] = TFFlaubertForTokenClassification(config=snake_case )
a__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a__ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Any:
"""simple docstring"""
a__ : Tuple = self.num_choices
a__ : Optional[int] = TFFlaubertForMultipleChoice(config=snake_case )
a__ : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
a__ : str = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
a__ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
a__ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a__ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = config_and_inputs
a__ : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Dict = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : str = False
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = TFFlaubertModelTester(self )
a__ : Tuple = ConfigTester(self , config_class=snake_case , emb_dim=37 )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _snake_case ( self ) -> Any:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Dict = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
a__ : int = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
a__ : Any = model(snake_case )[0]
a__ : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice.
a__ : int = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : Dict = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = set()
# keep track of all the paths to be checked
a__ : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
a__ : Dict = queue.pop(0 )
# get the last node from the path
a__ : int = path[-1]
if node not in explored:
a__ : Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
a__ : List[Any] = list(lowerCamelCase )
new_path.append(lowerCamelCase )
queue.append(lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
a__ : Optional[Any] = [start]
a__ : Tuple = set(lowerCamelCase )
# Keep tab on distances from `start` node.
a__ : str = {start: 0, target: -1}
while queue:
a__ : Dict = queue.pop(0 )
if node == target:
a__ : Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase )
queue.append(lowerCamelCase )
a__ : str = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = 0
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(snake_case , snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple = Path(snake_case ) / "preprocessor_config.json"
a__ : Tuple = Path(snake_case ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case , "w" ) )
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : str = Path(snake_case ) / "preprocessor_config.json"
a__ : List[Any] = Path(snake_case ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(snake_case , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case , "w" ) )
a__ : Tuple = AutoImageProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _snake_case ( self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : str = Path(snake_case ) / "preprocessor_config.json"
a__ : Optional[int] = Path(snake_case ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : Dict = AutoImageProcessor.from_pretrained(snake_case ).to_dict()
config_dict.pop("image_processor_type" )
a__ : int = CLIPImageProcessor(**snake_case )
# save in new folder
model_config.save_pretrained(snake_case )
config.save_pretrained(snake_case )
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case )
# make sure private variable is not incorrectly saved
a__ : str = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case , snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(snake_case ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case , "w" ) , )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "clip-base is not a local folder and is not a valid model identifier" ):
a__ : List[str] = AutoImageProcessor.from_pretrained("clip-base" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(snake_case , revision="aaaaaa" )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
snake_case , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
a__ : Optional[Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
a__ : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case )
a__ : Dict = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case )
a__ : Dict = AutoImageProcessor.from_pretrained(snake_case , trust_remote_code=snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case )
AutoImageProcessor.register(snake_case , snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case ):
AutoImageProcessor.register(snake_case , snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Optional[int] = Path(snake_case ) / "preprocessor_config.json"
a__ : List[Any] = Path(snake_case ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(snake_case , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case , "w" ) )
a__ : Any = CustomImageProcessor.from_pretrained(snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case )
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> int:
"""simple docstring"""
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = True
try:
AutoConfig.register("custom" , snake_case )
AutoImageProcessor.register(snake_case , snake_case )
# If remote code is not set, the default is to use local
a__ : List[Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : List[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(snake_case , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> str:
"""simple docstring"""
a__ : Any = parent
a__ : Dict = batch_size
a__ : Dict = seq_length
a__ : List[Any] = is_training
a__ : Any = use_attention_mask
a__ : Optional[Any] = use_token_type_ids
a__ : Optional[Any] = use_labels
a__ : Tuple = vocab_size
a__ : List[str] = hidden_size
a__ : Dict = num_hidden_layers
a__ : Union[str, Any] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Optional[Any] = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : int = max_position_embeddings
a__ : Union[str, Any] = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : List[Any] = initializer_range
a__ : Union[str, Any] = num_choices
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[int] = None
if self.use_attention_mask:
a__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Union[str, Any] = None
if self.use_token_type_ids:
a__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : List[str] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : List[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : Dict = config_and_inputs
a__ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : List[str] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = FlaxAlbertModelTester(self )
@slow
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
a__ : List[str] = model_class_name.from_pretrained("albert-base-v2" )
a__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Dict = FlaxAlbertModel.from_pretrained("albert-base-v2" )
a__ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
a__ : List[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
a__ : List[Any] = (1, 11, 768)
self.assertEqual(output.shape , snake_case )
a__ : Any = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) )
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : List[Any] = """BlipImageProcessor"""
_UpperCamelCase : Optional[Any] = """AutoTokenizer"""
def __init__( self , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = False
super().__init__(snake_case , snake_case )
a__ : int = self.image_processor
def __call__( self , snake_case = None , snake_case = None , snake_case = True , snake_case = False , snake_case = None , snake_case = None , snake_case = 0 , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = False , snake_case = False , snake_case = False , snake_case = True , snake_case = None , **snake_case , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
a__ : Optional[int] = self.tokenizer
a__ : Optional[int] = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
return text_encoding
# add pixel_values
a__ : Dict = self.image_processor(snake_case , return_tensors=snake_case )
if text is not None:
a__ : Tuple = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
else:
a__ : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case )
return encoding_image_processor
def _snake_case ( self , *snake_case , **snake_case ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def _snake_case ( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.tokenizer.model_input_names
a__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[Any] = TextToVideoSDPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_UpperCamelCase : Tuple = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _snake_case ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
a__ : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
a__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
a__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
a__ : Optional[int] = CLIPTextModel(snake_case )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a__ : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Optional[Any]:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : Dict = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Dict = self.get_dummy_components()
a__ : List[Any] = TextToVideoSDPipeline(**snake_case )
a__ : Dict = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
a__ : Union[str, Any] = self.get_dummy_inputs(snake_case )
a__ : List[Any] = "np"
a__ : Union[str, Any] = sd_pipe(**snake_case ).frames
a__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
a__ : Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Dict:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _snake_case ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _snake_case ( self ) -> Any:
"""simple docstring"""
pass
def _snake_case ( self ) -> str:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
a__ : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
a__ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
a__ : List[Any] = pipe.to("cuda" )
a__ : List[Any] = "Spiderman is surfing"
a__ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : str = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type="pt" ).frames
a__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
a__ : List[str] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
a__ : List[Any] = pipe.to("cuda" )
a__ : Union[str, Any] = "Spiderman is surfing"
a__ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Optional[int] = pipe(snake_case , generator=snake_case , num_inference_steps=2 , output_type="pt" ).frames
a__ : Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : List[str] = BertJapaneseTokenizer
_UpperCamelCase : int = False
_UpperCamelCase : Any = True
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
super().setUp()
a__ : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : str = "こんにちは、世界。 \nこんばんは、世界。"
a__ : Union[str, Any] = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
a__ , a__ : List[Any] = self.get_input_output_texts(snake_case )
a__ : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
a__ : List[Any] = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
return text, ids
def _snake_case ( self ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : str = self.tokenizer_class(self.vocab_file )
a__ : Union[str, Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(snake_case )
a__ : Any = "こんにちは、世界。\nこんばんは、世界。"
a__ : List[str] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a__ : Union[str, Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case , "wb" ) as handle:
pickle.dump(snake_case , snake_case )
with open(snake_case , "rb" ) as handle:
a__ : Dict = pickle.load(snake_case )
a__ : Optional[Any] = tokenizer_new.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Any = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
try:
a__ : str = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
try:
a__ : Optional[Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = MecabTokenizer(do_lower_case=snake_case , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
try:
a__ : Any = MecabTokenizer(
do_lower_case=snake_case , normalize_text=snake_case , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : List[str] = MecabTokenizer(normalize_text=snake_case , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(snake_case )
a__ : int = "こんにちは、世界。\nこんばんは、世界。"
a__ : List[Any] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a__ : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case , "wb" ) as handle:
pickle.dump(snake_case , snake_case )
with open(snake_case , "rb" ) as handle:
a__ : str = pickle.load(snake_case )
a__ : Tuple = tokenizer_new.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
@require_sudachi
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = SudachiTokenizer(do_lower_case=snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = SudachiTokenizer(normalize_text=snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = SudachiTokenizer(trim_whitespace=snake_case , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(snake_case )
a__ : Dict = "こんにちは、世界。\nこんばんは、世界。"
a__ : Optional[Any] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a__ : Tuple = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case , "wb" ) as handle:
pickle.dump(snake_case , snake_case )
with open(snake_case , "rb" ) as handle:
a__ : Any = pickle.load(snake_case )
a__ : Tuple = tokenizer_new.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
@require_jumanpp
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = JumanppTokenizer(do_lower_case=snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : List[Any] = JumanppTokenizer(normalize_text=snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : str = JumanppTokenizer(trim_whitespace=snake_case )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : str = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
a__ : Dict = {}
for i, token in enumerate(snake_case ):
a__ : Any = i
a__ : Dict = WordpieceTokenizer(vocab=snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
a__ : int = tokenizer.subword_tokenizer
a__ : List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(snake_case , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
a__ : Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(snake_case , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
a__ : Union[str, Any] = tokenizer.encode("ありがとう。" , add_special_tokens=snake_case )
a__ : List[str] = tokenizer.encode("どういたしまして。" , add_special_tokens=snake_case )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
a__ : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : int = BertJapaneseTokenizer
_UpperCamelCase : Optional[int] = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
a__ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self , **snake_case ) -> Union[str, Any]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **snake_case )
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : Dict = "こんにちは、世界。 \nこんばんは、世界。"
a__ : List[Any] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
pass # TODO add if relevant
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
a__ : Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
snake_case , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : str = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
a__ : Any = {}
for i, token in enumerate(snake_case ):
a__ : Tuple = i
a__ : Union[str, Any] = CharacterTokenizer(vocab=snake_case , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
a__ : List[str] = tokenizer.encode("ありがとう。" , add_special_tokens=snake_case )
a__ : List[str] = tokenizer.encode("どういたしまして。" , add_special_tokens=snake_case )
a__ : Dict = tokenizer.build_inputs_with_special_tokens(snake_case )
a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Dict = "cl-tohoku/bert-base-japanese"
a__ : Optional[int] = AutoTokenizer.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(snake_case )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
a__ : Optional[int] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(snake_case )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ : Tuple = logging.getLogger()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
os.makedirs(snake_case , exist_ok=snake_case )
a__ : Optional[int] = {"source": "What is love ?", "target": "life"}
a__ : Union[str, Any] = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
a__ : List[Any] = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case , F"""{split}.{field}""" ) , "w" ) as f:
f.write(snake_case )
def _snake_case ( self , snake_case , snake_case = "pytorch" ) -> str:
"""simple docstring"""
a__ : Any = self.get_auto_remove_tmp_dir()
a__ : Tuple = os.path.join(snake_case , "output" )
a__ : List[str] = os.path.join(snake_case , "data" )
self._create_dummy_data(data_dir=snake_case )
a__ : Union[str, Any] = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
a__ : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case , env=self.get_env() )
a__ : Tuple = os.path.join(snake_case , "metrics.json" )
with open(snake_case ) as f:
a__ : int = json.load(snake_case )
return result
@require_torch_gpu
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : int = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
_UpperCamelCase : List[Any] = """all_checks"""
_UpperCamelCase : Tuple = """basic_checks"""
_UpperCamelCase : int = """no_checks"""
class __lowerCAmelCase ( _UpperCamelCase ):
pass
class __lowerCAmelCase ( _UpperCamelCase ):
pass
class __lowerCAmelCase ( _UpperCamelCase ):
pass
class __lowerCAmelCase ( _UpperCamelCase ):
pass
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) )
if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) )
a__ : Any = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
a__ : str = " for " + verification_name if verification_name is not None else ""
if len(lowerCamelCase ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __lowerCAmelCase ( _UpperCamelCase ):
pass
class __lowerCAmelCase ( _UpperCamelCase ):
pass
class __lowerCAmelCase ( _UpperCamelCase ):
pass
class __lowerCAmelCase ( _UpperCamelCase ):
pass
def _A ( lowerCamelCase , lowerCamelCase ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) )
if len(set(lowerCamelCase ) - set(lowerCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCamelCase ) - set(lowerCamelCase ) ) )
a__ : List[Any] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCamelCase ) )
logger.info("All the splits matched successfully." )
def _A ( lowerCamelCase , lowerCamelCase = True ):
if record_checksum:
a__ : Tuple = shaaaa()
with open(lowerCamelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(lowerCamelCase )
a__ : Optional[Any] = m.hexdigest()
else:
a__ : Optional[int] = None
return {"num_bytes": os.path.getsize(lowerCamelCase ), "checksum": checksum}
def _A ( lowerCamelCase ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("""_T""")
class __lowerCAmelCase ( Generic[_T] ):
def __init__( self , snake_case = None ) -> None:
"""simple docstring"""
a__ : list[_T] = list(iterable or [] )
a__ : list[_T] = []
def __len__( self ) -> int:
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
"""simple docstring"""
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def _snake_case ( self , snake_case ) -> None:
"""simple docstring"""
self._stacka.append(snake_case )
def _snake_case ( self ) -> _T:
"""simple docstring"""
a__ : Union[str, Any] = self._stacka.pop
a__ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : int = int(lowerCamelCase )
assert noofclusters < len(lowerCamelCase )
# Find out the dimensionality
a__ : List[str] = len(vectors[0] )
# Will help select random centroids from among the available vectors
a__ : List[str] = list(range(len(lowerCamelCase ) ) )
shuffle(lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a__ : Any = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a__ : Optional[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a__ : Union[str, Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
a__ : str = tf.placeholder("float64" , [dim] )
a__ : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a__ : List[Any] = [tf.Variable(0 ) for i in range(len(lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a__ : List[str] = tf.placeholder("int32" )
a__ : int = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a__ : Tuple = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a__ : List[str] = tf.reduce_mean(lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a__ : Any = tf.placeholder("float" , [dim] )
a__ : List[Any] = tf.placeholder("float" , [dim] )
a__ : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase , lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a__ : Optional[int] = tf.placeholder("float" , [noofclusters] )
a__ : int = tf.argmin(lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a__ : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a__ : Tuple = 100
for _ in range(lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase ) ):
a__ : Tuple = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a__ : Tuple = [
sess.run(lowerCamelCase , feed_dict={va: vect, va: sess.run(lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a__ : Any = sess.run(
lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase ):
# Collect all the vectors assigned to this cluster
a__ : List[Any] = [
vectors[i]
for i in range(len(lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a__ : Optional[Any] = sess.run(
lowerCamelCase , feed_dict={mean_input: array(lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a__ : Optional[int] = sess.run(lowerCamelCase )
a__ : Optional[int] = sess.run(lowerCamelCase )
return centroids, assignments
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import os
def _A ( ):
with open(os.path.dirname(lowerCamelCase ) + "/grid.txt" ) as f:
a__ : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCamelCase ) for x in f.readline().split()] )
a__ : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
a__ : Tuple = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a__ : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
a__ : Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a__ : Any = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a__ : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a__ : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a__ : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from __future__ import annotations
import math
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Tuple = u
for i in range(1 , lowerCamelCase ):
a__ : Dict = temp * (u - i)
return temp
def _A ( ):
a__ : Tuple = int(input("enter the numbers of values: " ) )
a__ : list[list[float]] = []
for _ in range(lowerCamelCase ):
y.append([] )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
y[i].append(lowerCamelCase )
a__ : Any = 0
print("enter the values of parameters in a list: " )
a__ : Union[str, Any] = list(map(lowerCamelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCamelCase ):
a__ : Any = float(input() )
a__ : Dict = int(input("enter the value to interpolate: " ) )
a__ : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase ):
for j in range(n - i ):
a__ : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
a__ : List[str] = y[0][0]
for i in range(1 , lowerCamelCase ):
summ += (ucal(lowerCamelCase , lowerCamelCase ) * y[0][i]) / math.factorial(lowerCamelCase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE__ : str = 5_0_0_0_0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 5_0_0_0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = os.path.split(__file__)
SCREAMING_SNAKE_CASE__ : str = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _A ( lowerCamelCase , lowerCamelCase ):
for i in range(lowerCamelCase ):
a__ : Optional[Any] = dataset[i]
@get_duration
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ):
a__ : str = dataset[i : i + batch_size]
@get_duration
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(lowerCamelCase ):
a__ : Tuple = dataset[i]
@get_duration
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with dataset.formatted_as(type=lowerCamelCase ):
for i in range(0 , lowerCamelCase , lowerCamelCase ):
a__ : int = dataset[i : i + batch_size]
def _A ( ):
a__ : Dict = {"num examples": SPEED_TEST_N_EXAMPLES}
a__ : Union[str, Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
a__ : Optional[Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a__ : Dict = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a__ : str = generate_example_dataset(
os.path.join(lowerCamelCase , "dataset.arrow" ) , lowerCamelCase , num_examples=lowerCamelCase , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCamelCase ) )
a__ : Dict = func(lowerCamelCase , **lowerCamelCase )
print("shuffling dataset" )
a__ : List[Any] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCamelCase ) )
a__ : Tuple = func(
lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , "wb" ) as f:
f.write(json.dumps(lowerCamelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ):
a__ : Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
a__ : Union[str, Any] = bs[:]
a__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase )
cs.append(2**8 + n )
n += 1
a__ : Optional[Any] = [chr(lowerCamelCase ) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase ) )
def _A ( lowerCamelCase ):
a__ : List[Any] = set()
a__ : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ : Optional[Any] = char
return pairs
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
a__ : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
a__ : List[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
a__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
a__ : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
a__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding="utf-8" ) as vocab_handle:
a__ : Dict = json.load(snake_case )
a__ : str = {v: k for k, v in self.encoder.items()}
a__ : Tuple = errors # how to handle errors in decoding
a__ : List[str] = bytes_to_unicode()
a__ : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding="utf-8" ) as merges_handle:
a__ : Optional[int] = merges_handle.read().split("\n" )[1:-1]
a__ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
a__ : Dict = dict(zip(snake_case , range(len(snake_case ) ) ) )
a__ : Optional[int] = {}
a__ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a__ : List[str] = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self ) -> str:
"""simple docstring"""
return len(self.encoder )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : Optional[Any] = tuple(snake_case )
a__ : Optional[int] = get_pairs(snake_case )
if not pairs:
return token
while True:
a__ : Any = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : int = bigram
a__ : Optional[int] = []
a__ : List[str] = 0
while i < len(snake_case ):
try:
a__ : Optional[Any] = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a__ : Union[str, Any] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : str = tuple(snake_case )
a__ : str = new_word
if len(snake_case ) == 1:
break
else:
a__ : Dict = get_pairs(snake_case )
a__ : Optional[int] = " ".join(snake_case )
a__ : int = word
return word
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
a__ : Dict = []
for token in re.findall(self.pat , snake_case ):
a__ : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(" " ) )
return bpe_tokens
def _snake_case ( self , snake_case ) -> List[Any]:
"""simple docstring"""
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
return self.decoder.get(snake_case )
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = "".join(snake_case )
a__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a__ : str = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + "\n" )
a__ : Union[str, Any] = 0
with open(snake_case , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
a__ : List[Any] = token_index
writer.write(" ".join(snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a__ : Dict = [self.cls_token_id]
a__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : int = [self.sep_token_id]
a__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , snake_case , snake_case=False , **snake_case ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
a__ : List[Any] = " " + text
return (text, kwargs)
def _snake_case ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ) -> dict:
"""simple docstring"""
a__ : Union[str, Any] = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
a__ : str = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a__ : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a__ : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(snake_case )
if needs_to_be_padded:
a__ : Optional[int] = len(snake_case ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a__ : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
a__ : Any = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.48_145_466, 0.4_578_275, 0.40_821_073] , snake_case=[0.26_862_954, 0.26_130_258, 0.27_577_711] , snake_case=True , ) -> Any:
"""simple docstring"""
a__ : Dict = size if size is not None else {"height": 224, "width": 224}
a__ : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
a__ : Optional[int] = parent
a__ : Optional[Any] = batch_size
a__ : List[str] = num_channels
a__ : List[str] = image_size
a__ : Dict = min_resolution
a__ : List[str] = max_resolution
a__ : str = do_resize
a__ : Dict = size
a__ : str = do_center_crop
a__ : Optional[Any] = crop_size
a__ : Union[str, Any] = do_normalize
a__ : Optional[int] = image_mean
a__ : Any = image_std
a__ : Optional[Any] = do_convert_rgb
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _snake_case ( self , snake_case=False , snake_case=False , snake_case=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
a__ : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
a__ : List[Any] = []
for i in range(self.batch_size ):
a__ , a__ : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
a__ : str = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
if torchify:
a__ : Tuple = [torch.from_numpy(snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case , "center_crop" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_convert_rgb" ) )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
a__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Tuple = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
a__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : str = ChineseCLIPImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case )
a__ : Any = 3
@property
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case , "center_crop" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_convert_rgb" ) )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
pass
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
a__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Tuple = inspect.getfile(accelerate.test_utils )
a__ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
a__ : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
a__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
a__ : Optional[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices.""" )
a__ : Dict = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self ) -> str:
"""simple docstring"""
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
a__ : str = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(snake_case , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (accelerator.state.process_index + 2, 1_0)
SCREAMING_SNAKE_CASE__ : Dict = torch.randint(0, 1_0, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE__ : List[Any] = """"""
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE__ : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE__ : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : Tuple = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE__ : Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE__ : Optional[int] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _snake_case ( self , snake_case , snake_case , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ) -> Optional[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a__ : Optional[int] = np.array([re.sub(A__ , "" , A__ ) for x in predictions] )
a__ : int = np.array([re.sub(A__ , "" , A__ ) for x in references] )
else:
a__ : Tuple = np.asarray(A__ )
a__ : Tuple = np.asarray(A__ )
if ignore_case:
a__ : Dict = np.char.lower(A__ )
a__ : Any = np.char.lower(A__ )
if ignore_punctuation:
a__ : Optional[Any] = string.punctuation.maketrans("" , "" , string.punctuation )
a__ : List[Any] = np.char.translate(A__ , table=A__ )
a__ : Dict = np.char.translate(A__ , table=A__ )
if ignore_numbers:
a__ : Union[str, Any] = string.digits.maketrans("" , "" , string.digits )
a__ : Any = np.char.translate(A__ , table=A__ )
a__ : List[str] = np.char.translate(A__ , table=A__ )
a__ : List[Any] = predictions == references
return {"exact_match": np.mean(A__ ) * 100}
| 700 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 0 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def _A ( lowerCamelCase ):
a__ : int = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
a__ : List[Any] = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , __A )
if matches:
a__ : Union[str, Any] = float(matches[1] )
a__ : Any = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
a__ : Dict = 1001
a__ : Optional[int] = "imagenet-1k-id2label.json"
a__ : Any = "huggingface/label-files"
a__ : Dict = json.load(open(hf_hub_download(__A , __A , repo_type="dataset" ) , "r" ) )
a__ : List[str] = {int(__A ) + 1: v for k, v in idalabel.items()}
a__ : Union[str, Any] = "background"
a__ : Dict = idalabel
a__ : Any = {v: k for k, v in idalabel.items()}
return config
def _A ( ):
a__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : int = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
a__ : Any = get_mobilenet_va_config(__A )
# Load 🤗 model
a__ : Tuple = MobileNetVaForImageClassification(__A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__A , __A , __A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
a__ : Optional[int] = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
a__ : str = image_processor(images=prepare_img() , return_tensors="pt" )
a__ : int = model(**__A )
a__ : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
a__ : Dict = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
a__ : int = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
a__ : Dict = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __A , atol=1E-4 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
print("Pushing to the hub..." )
a__ : Optional[Any] = "google/" + model_name
image_processor.push_to_hub(__A )
model.push_to_hub(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 701 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _A ( lowerCamelCase , lowerCamelCase=None ):
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__)
def _A ( ):
a__ : Union[str, Any] = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase , type=lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
a__ : Union[str, Any] = parser.parse_args()
return args
def _A ( lowerCamelCase ):
def fn(lowerCamelCase ):
return tokenizer(examples["text"] )
return fn
def _A ( lowerCamelCase ):
a__ : Tuple = []
for i in range(len(tokenized_data["input_ids"] ) ):
a__ : Any = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
a__ : Optional[int] = tf.train.Features(feature=lowerCamelCase )
a__ : int = tf.train.Example(features=lowerCamelCase )
a__ : Dict = example.SerializeToString()
records.append(lowerCamelCase )
return records
def _A ( lowerCamelCase ):
a__ : Dict = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ : Optional[int] = min(len(lowerCamelCase ) , args.limit )
a__ : Tuple = dataset.select(range(lowerCamelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a__ : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ : List[str] = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase ):
os.makedirs(lowerCamelCase )
else:
a__ : int = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ : str = tokenize_function(lowerCamelCase )
a__ : List[Any] = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase ):
# Concatenate all texts.
a__ : List[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ : Union[str, Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ : int = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ : Tuple = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ : Tuple = dataset_tokenized.map(lowerCamelCase , batched=lowerCamelCase , batch_size=1000 , num_proc=4 )
a__ : List[str] = 0
a__ : List[Any] = 0
for shard in range(0 , len(lowerCamelCase ) , args.shard_size ):
a__ : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
a__ : List[str] = len(dataset_snapshot["input_ids"] )
a__ : List[str] = os.path.join(lowerCamelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a__ : List[str] = get_serialized_examples(lowerCamelCase )
with tf.io.TFRecordWriter(lowerCamelCase ) as out_file:
for i in range(len(lowerCamelCase ) ):
a__ : Any = serialized_examples[i]
out_file.write(lowerCamelCase )
print("Wrote file {} containing {} records".format(lowerCamelCase , lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
main(args)
| 703 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Dict = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __lowerCAmelCase ( lowercase__ ):
_UpperCamelCase : Any = """Wav2Vec2FeatureExtractor"""
_UpperCamelCase : Dict = """AutoTokenizer"""
def __init__( self , snake_case , snake_case ) -> str:
"""simple docstring"""
super().__init__(__lowerCamelCase , __lowerCamelCase )
a__ : Any = self.feature_extractor
a__ : List[Any] = False
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
try:
return super().from_pretrained(__lowerCamelCase , **__lowerCamelCase )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , __lowerCamelCase , )
a__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a__ : Tuple = WavaVecaCTCTokenizer.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
return cls(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
def __call__( self , *snake_case , **snake_case ) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a__ : Union[str, Any] = kwargs.pop("raw_speech" )
else:
a__ : List[Any] = kwargs.pop("audio" , __lowerCamelCase )
a__ : Tuple = kwargs.pop("sampling_rate" , __lowerCamelCase )
a__ : Optional[Any] = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a__ : Any = args[0]
a__ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a__ : List[Any] = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
a__ : Tuple = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ : Any = encodings["input_ids"]
return inputs
def _snake_case ( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__lowerCamelCase , **__lowerCamelCase )
a__ : Optional[int] = kwargs.pop("input_features" , __lowerCamelCase )
a__ : List[str] = kwargs.pop("labels" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
a__ : Dict = args[0]
a__ : List[str] = args[1:]
if input_features is not None:
a__ : Tuple = self.feature_extractor.pad(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
if labels is not None:
a__ : Dict = self.tokenizer.pad(__lowerCamelCase , **__lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a__ : Any = labels["input_ids"]
return input_features
def _snake_case ( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self , *snake_case , **snake_case ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def _snake_case ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a__ : Tuple = True
a__ : Optional[int] = self.tokenizer
yield
a__ : Tuple = self.feature_extractor
a__ : Optional[int] = False
| 705 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __lowerCAmelCase ( __lowerCamelCase ):
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
with open(UpperCAmelCase_ , encoding="utf-8" ) as input_file:
a__ : List[Any] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a__ : Dict = input_file.read()
a__ : Dict = regexp.search(UpperCAmelCase_ )
return match
def _snake_case ( self , snake_case ) -> Optional[int]:
"""simple docstring"""
with open(UpperCAmelCase_ , encoding="utf-8" ) as input_file:
a__ : Tuple = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a__ : List[str] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a__ : Union[str, Any] = regexp.finditer(UpperCAmelCase_ )
a__ : Tuple = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Dict = Path("./datasets" )
a__ : List[str] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase_ ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = Path("./datasets" )
a__ : str = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase_ ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 706 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 'blip_2_vision_model'
def __init__( self , snake_case=1_408 , snake_case=6_144 , snake_case=39 , snake_case=16 , snake_case=224 , snake_case=14 , snake_case="gelu" , snake_case=0.00_001 , snake_case=0.0 , snake_case=1E-10 , snake_case=True , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_lowercase )
a__ : Optional[int] = hidden_size
a__ : Dict = intermediate_size
a__ : str = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : Any = patch_size
a__ : Dict = image_size
a__ : Optional[Any] = initializer_range
a__ : str = attention_dropout
a__ : Dict = layer_norm_eps
a__ : Tuple = hidden_act
a__ : Optional[int] = qkv_bias
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_lowercase )
a__ : Any = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
a__ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
_UpperCamelCase : int = 'blip_2_qformer'
def __init__( self , snake_case=30_522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="absolute" , snake_case=2 , snake_case=1_408 , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
a__ : List[Any] = vocab_size
a__ : Tuple = hidden_size
a__ : Optional[int] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Dict = hidden_act
a__ : Optional[Any] = intermediate_size
a__ : Optional[int] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : List[Any] = max_position_embeddings
a__ : Any = initializer_range
a__ : Tuple = layer_norm_eps
a__ : Optional[int] = position_embedding_type
a__ : List[str] = cross_attention_frequency
a__ : Optional[Any] = encoder_hidden_size
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_lowercase )
a__ : str = cls.get_config_dict(_lowercase , **_lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
a__ : Union[str, Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class __lowerCAmelCase ( UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = 'blip-2'
_UpperCamelCase : str = True
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case=32 , **snake_case ) -> int:
"""simple docstring"""
super().__init__(**_lowercase )
if vision_config is None:
a__ : Tuple = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
a__ : str = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
a__ : Tuple = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
a__ : List[str] = BlipaVisionConfig(**_lowercase )
a__ : Union[str, Any] = BlipaQFormerConfig(**_lowercase )
a__ : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
a__ : Any = CONFIG_MAPPING[text_model_type](**_lowercase )
a__ : int = self.text_config.tie_word_embeddings
a__ : int = self.text_config.is_encoder_decoder
a__ : Tuple = num_query_tokens
a__ : Tuple = self.vision_config.hidden_size
a__ : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a__ : Any = 1.0
a__ : Union[str, Any] = 0.02
@classmethod
def _snake_case ( cls , snake_case , snake_case , snake_case , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : List[Any] = copy.deepcopy(self.__dict__ )
a__ : int = self.vision_config.to_dict()
a__ : Any = self.qformer_config.to_dict()
a__ : Optional[int] = self.text_config.to_dict()
a__ : str = self.__class__.model_type
return output
| 707 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 0 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__A )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : Union[str, Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 708 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE__ : Optional[int] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
a__ : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(UpperCamelCase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[int] = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : str = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
a__ : Any = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
a__ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
a__ : Dict = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
a__ : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(UpperCamelCase_ , "w" , newline="\n" ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , "r" ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , UpperCamelCase_ ) , )
# Copy consistency with a really long name
a__ : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , UpperCamelCase_ , overwrite_result=re.sub("DDPM" , "Test" , UpperCamelCase_ ) , )
| 710 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ['YolosFeatureExtractor']
SCREAMING_SNAKE_CASE__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 0 |
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = len(__lowerCAmelCase ) + 1
a__ : int = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
a__ : Dict = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
a__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
a__ : Union[str, Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
a__ : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
a__ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
a__ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
a__ : List[Any] = dp[i - 1][j]
else:
a__ : Optional[int] = 0
else:
a__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE__ : Any = "aab"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}') | 712 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ : List[str] = {
"""n_samples""": 6_4,
"""horizon""": 3_2,
"""num_inference_steps""": 2_0,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = """hopper-medium-v2"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = gym.make(env_name)
SCREAMING_SNAKE_CASE__ : Dict = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ : Optional[int] = env.reset()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1_0_0_0
SCREAMING_SNAKE_CASE__ : List[str] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ : Optional[int] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ : Any = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ : Tuple = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 713 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : int = LxmertTokenizer
_UpperCamelCase : Union[str, Any] = LxmertTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : str = True
def _snake_case ( self ) -> Any:
"""simple docstring"""
super().setUp()
a__ : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "UNwant\u00E9d,running"
a__ : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Dict = self.tokenizer_class(self.vocab_file )
a__ : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : Tuple = self.get_tokenizer()
a__ : int = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : Tuple = tokenizer.tokenize(_UpperCAmelCase )
a__ : Union[str, Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
a__ : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
a__ : Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
a__ : List[Any] = self.get_rust_tokenizer()
a__ : Any = tokenizer.encode(_UpperCAmelCase )
a__ : Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _A ( lowerCamelCase , lowerCamelCase=False ):
try:
a__ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
a__ : List[str] = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE__ : str = parse_flag_from_env("""RUN_SLOW""", default=False)
def _A ( lowerCamelCase ):
return unittest.skip("Test was skipped" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(_run_slow_tests , "test is slow" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(__A )
def _A ( lowerCamelCase=None , lowerCamelCase=None ):
if test_case is None:
return partial(__A , version=__A )
return unittest.skipUnless(is_torch_version(">=" , __A ) , F"""test requires torch version >= {version}""" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(__A )
def _A ( lowerCamelCase ):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(__A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _A ( lowerCamelCase ):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(__A )
class __lowerCAmelCase ( unittest.TestCase ):
_UpperCamelCase : Dict = True
@classmethod
def _snake_case ( cls ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = tempfile.mkdtemp()
@classmethod
def _snake_case ( cls ) -> List[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Any:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _A ( lowerCamelCase ):
a__ : Tuple = AcceleratorState()
a__ : Tuple = tensor[None].clone().to(state.device )
a__ : int = gather(__A ).cpu()
a__ : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __A ):
return False
return True
class __lowerCAmelCase :
def __init__( self , snake_case , snake_case , snake_case ) -> List[str]:
"""simple docstring"""
a__ : List[str] = returncode
a__ : Union[str, Any] = stdout
a__ : List[Any] = stderr
async def _A ( lowerCamelCase , lowerCamelCase ):
while True:
a__ : List[str] = await stream.readline()
if line:
callback(__A )
else:
break
async def _A ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False ):
if echo:
print("\nRunning: " , " ".join(__A ) )
a__ : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ : int = []
a__ : int = []
def tee(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase="" ):
a__ : Dict = line.decode("utf-8" ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase : tee(__A , __A , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase : tee(__A , __A , sys.stderr , label="stderr:" ) ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def _A ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=180 , lowerCamelCase=False , lowerCamelCase=True ):
a__ : List[Any] = asyncio.get_event_loop()
a__ : Any = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
a__ : Optional[int] = """ """.join(__A )
if result.returncode > 0:
a__ : Optional[Any] = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __lowerCAmelCase ( __lowerCAmelCase ):
pass
def _A ( lowerCamelCase , lowerCamelCase=False ):
try:
a__ : Optional[Any] = subprocess.check_output(__A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__A , "decode" ):
a__ : List[str] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 715 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _A ( lowerCamelCase ):
if not sentence:
return ""
a__ : List[str] = dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for attribute in key.split("." ):
a__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
a__ : str = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
a__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a__ : str = value
elif weight_type == "weight_g":
a__ : Dict = value
elif weight_type == "weight_v":
a__ : List[str] = value
elif weight_type == "bias":
a__ : str = value
else:
a__ : Union[str, Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = []
a__ : Optional[int] = fairseq_model.state_dict()
a__ : Any = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
a__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
a__ : Dict = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
a__ : str = True
if "*" in mapped_key:
a__ : str = name.split(_lowerCamelCase )[0].split("." )[-2]
a__ : Tuple = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
a__ : List[str] = "weight_g"
elif "weight_v" in name:
a__ : int = "weight_v"
elif "weight" in name:
a__ : Tuple = "weight"
elif "bias" in name:
a__ : int = "bias"
else:
a__ : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : str = full_name.split("conv_layers." )[-1]
a__ : Union[str, Any] = name.split("." )
a__ : List[Any] = int(items[0] )
a__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a__ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a__ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ):
if config_path is not None:
a__ : List[Any] = HubertConfig.from_pretrained(_lowerCamelCase )
else:
a__ : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
a__ : Optional[int] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Dict = target_dict.pad_index
a__ : Any = target_dict.bos_index
a__ : Tuple = target_dict.eos_index
a__ : int = len(target_dict.symbols )
a__ : Tuple = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
a__ : Optional[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
a__ : Dict = True if config.feat_extract_norm == "layer" else False
a__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
a__ : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
a__ : str = HubertForCTC(_lowerCamelCase )
else:
a__ : int = HubertModel(_lowerCamelCase )
if is_finetuned:
a__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a__ : Optional[Any] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 717 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
def __init__( self , snake_case , ) -> int:
"""simple docstring"""
a__ : Dict = parent
a__ : Tuple = 13
a__ : Union[str, Any] = 7
a__ : Optional[Any] = True
a__ : int = True
a__ : Optional[int] = True
a__ : int = 99
a__ : Optional[Any] = 32
a__ : Optional[Any] = 2
a__ : Optional[int] = 4
a__ : Dict = 37
a__ : Union[str, Any] = '''gelu'''
a__ : Optional[Any] = 0.1
a__ : Union[str, Any] = 0.1
a__ : Optional[int] = 512
a__ : Union[str, Any] = 16
a__ : List[Any] = 2
a__ : Dict = 0.02
a__ : List[str] = 3
a__ : Optional[int] = 4
a__ : Dict = None
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[Any] = None
if self.use_input_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Tuple = None
a__ : Union[str, Any] = None
a__ : List[str] = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : int = ids_tensor([self.batch_size] , self.num_choices )
a__ : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
(
a__
) : List[str] = self.prepare_config_and_inputs()
a__ : str = True
a__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict:
"""simple docstring"""
a__ : List[Any] = TFEsmModel(config=lowerCamelCase_ )
a__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a__ : int = model(lowerCamelCase_ )
a__ : Optional[Any] = [input_ids, input_mask]
a__ : int = model(lowerCamelCase_ )
a__ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> str:
"""simple docstring"""
a__ : Dict = True
a__ : Tuple = TFEsmModel(config=lowerCamelCase_ )
a__ : Dict = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
a__ : Any = model(lowerCamelCase_ )
a__ : List[str] = [input_ids, input_mask]
a__ : Any = model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
a__ : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
a__ : int = TFEsmForMaskedLM(config=lowerCamelCase_ )
a__ : Optional[int] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
a__ : List[Any] = self.num_labels
a__ : List[Any] = TFEsmForTokenClassification(config=lowerCamelCase_ )
a__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
a__ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = self.prepare_config_and_inputs()
(
a__
) : List[Any] = config_and_inputs
a__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
_UpperCamelCase : int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCamelCase : List[str] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[str] = False
_UpperCamelCase : Optional[Any] = False
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = TFEsmModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def _snake_case ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def _snake_case ( self ) -> str:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : List[Any] = TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("Protein models do not support embedding resizing." )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("Protein models do not support embedding resizing." )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
pass
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Tuple = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a__ : Dict = model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
a__ : List[Any] = model.get_output_embeddings()
assert x is None
a__ : str = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : int = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
a__ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
a__ : Tuple = model(lowerCamelCase_ )[0]
a__ : Any = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
a__ : Optional[Any] = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
a__ : List[str] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a__ : Union[str, Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
a__ : Any = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 718 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _A ( lowerCamelCase ):
a__ : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def _A ( lowerCamelCase ):
a__ , a__ : Union[str, Any] = emb.weight.shape
a__ : Any = nn.Linear(_A , _A , bias=_A )
a__ : Union[str, Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase ):
a__ : Dict = torch.load(_A , map_location="cpu" )
a__ : List[str] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
a__ : List[str] = mam_aaa["model"]
remove_ignore_keys_(_A )
a__ : List[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
a__ : str = MaMaaaConfig(
vocab_size=_A , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
a__ : Dict = state_dict["decoder.embed_tokens.weight"]
a__ : List[str] = MaMaaaForConditionalGeneration(_A )
model.model.load_state_dict(_A , strict=_A )
a__ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 719 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE__ : Optional[int] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE__ : Optional[Any] = object()
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(lowerCamelCase ) - len(lowerCamelCase ) + 1 ):
a__ : Optional[Any] = [x.match(lowerCamelCase ) for x, y in zip(lowerCamelCase , ks[i:] )]
if matches and all(lowerCamelCase ):
return True
return False
def _A ( lowerCamelCase ):
def replace(lowerCamelCase , lowerCamelCase ):
for rule, replacement in rules:
if _match(lowerCamelCase , lowerCamelCase ):
return replacement
return val
return replace
def _A ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp" , lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _A ( lowerCamelCase ):
a__ : Dict = _get_partition_rules()
a__ : int = _replacement_rules(lowerCamelCase )
a__ : Dict = {k: _unmatched for k in flatten_dict(lowerCamelCase )}
a__ : Dict = {k: replace(lowerCamelCase , lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase ) )
| 720 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ : Optional[Any] = threading.Lock()
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Tuple = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ : int = logging.WARNING
SCREAMING_SNAKE_CASE__ : str = True
def _A ( ):
a__ : Union[str, Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _A ( ):
return __name__.split("." )[0]
def _A ( ):
return logging.getLogger(_get_library_name() )
def _A ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
a__ : Union[str, Any] = logging.StreamHandler() # Set sys.stderr as stream.
a__ : List[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
a__ : Any = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
a__ : str = False
def _A ( ):
global _default_handler
with _lock:
if not _default_handler:
return
a__ : List[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
a__ : Tuple = None
def _A ( ):
return log_levels
def _A ( lowerCamelCase = None ):
if name is None:
a__ : Dict = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case__ )
def _A ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _A ( lowerCamelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case__ )
def _A ( ):
return set_verbosity(snake_case__ )
def _A ( ):
return set_verbosity(snake_case__ )
def _A ( ):
return set_verbosity(snake_case__ )
def _A ( ):
return set_verbosity(snake_case__ )
def _A ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _A ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _A ( lowerCamelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case__ )
def _A ( lowerCamelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case__ )
def _A ( ):
_configure_library_root_logger()
a__ : Union[str, Any] = False
def _A ( ):
_configure_library_root_logger()
a__ : List[Any] = True
def _A ( ):
a__ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
a__ : Tuple = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(snake_case__ )
def _A ( ):
a__ : Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case__ )
def _A ( self , *lowerCamelCase , **lowerCamelCase ):
a__ : str = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , snake_case__ )
if no_advisory_warnings:
return
self.warning(*snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = warning_advice
@functools.lru_cache(snake_case__ )
def _A ( self , *lowerCamelCase , **lowerCamelCase ):
self.warning(*snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE__ : List[Any] = warning_once
class __lowerCAmelCase :
def __init__( self , *snake_case , **snake_case ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
a__ : Union[str, Any] = args[0] if args else None
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case ) -> str:
"""simple docstring"""
def empty_fn(*snake_case , **snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Union[str, Any]:
"""simple docstring"""
return self
def __exit__( self , snake_case , snake_case , snake_case ) -> Tuple:
"""simple docstring"""
return
class __lowerCAmelCase :
def __call__( self , *snake_case , **snake_case ) -> List[str]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*A_ , **A_ )
else:
return EmptyTqdm(*A_ , **A_ )
def _snake_case ( self , *snake_case , **snake_case ) -> Any:
"""simple docstring"""
a__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ , **A_ )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _tqdm_cls()
def _A ( ):
global _tqdm_active
return bool(_tqdm_active )
def _A ( ):
global _tqdm_active
a__ : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def _A ( ):
global _tqdm_active
a__ : Any = False
hf_hub_utils.disable_progress_bars()
| 700 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 0 |
import torch
from torch import nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case=1 , snake_case=False ) -> List[Any]:
"""simple docstring"""
super().__init__()
a__ : List[str] = n_token
a__ : Optional[Any] = d_embed
a__ : str = d_proj
a__ : Optional[int] = cutoffs + [n_token]
a__ : Union[str, Any] = [0] + self.cutoffs
a__ : Optional[int] = div_val
a__ : List[str] = self.cutoffs[0]
a__ : Dict = len(self.cutoffs ) - 1
a__ : Optional[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
a__ : str = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
a__ : List[str] = nn.Parameter(torch.zeros(self.n_clusters ) )
a__ : Union[str, Any] = nn.ModuleList()
a__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
else:
self.out_projs.append(snake_case )
self.out_layers.append(nn.Linear(snake_case , snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
a__ , a__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ : Tuple = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case , snake_case ) ) )
self.out_layers.append(nn.Linear(snake_case , r_idx - l_idx ) )
a__ : Optional[Any] = keep_order
def _snake_case ( self , snake_case , snake_case , snake_case , snake_case ) -> Any:
"""simple docstring"""
if proj is None:
a__ : Optional[Any] = nn.functional.linear(snake_case , snake_case , bias=snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
a__ : Optional[int] = nn.functional.linear(snake_case , proj.t().contiguous() )
a__ : Tuple = nn.functional.linear(snake_case , snake_case , bias=snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _snake_case ( self , snake_case , snake_case=None , snake_case=False ) -> Optional[Any]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
a__ : Optional[Any] = hidden[..., :-1, :].contiguous()
a__ : Optional[int] = labels[..., 1:].contiguous()
a__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
a__ : List[Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
a__ : Any = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
a__ : Dict = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
a__ : List[Any] = labels != -100
a__ : str = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
a__ : Any = (
-nn.functional.log_softmax(snake_case , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
a__ : int = nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
a__ , a__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a__ , a__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ : Any = self.out_layers[0].weight[l_idx:r_idx]
a__ : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
a__ : str = self.out_layers[i].weight
a__ : List[str] = self.out_layers[i].bias
if i == 0:
a__ : int = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
a__ , a__ , a__ : Tuple = weights[0], biases[0], self.out_projs[0]
a__ : Any = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
a__ : str = nn.functional.log_softmax(snake_case , dim=1 )
if labels is None:
a__ : Dict = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
a__ : Optional[int] = torch.zeros_like(snake_case , dtype=hidden.dtype , device=hidden.device )
a__ : Tuple = 0
a__ : List[str] = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
a__ , a__ : Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
a__ : Any = (labels >= l_idx) & (labels < r_idx)
a__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
a__ : int = labels.index_select(0 , snake_case ) - l_idx
a__ : Optional[Any] = head_logprob.index_select(0 , snake_case )
a__ : Union[str, Any] = hidden.index_select(0 , snake_case )
else:
a__ : Any = hidden
if i == 0:
if labels is not None:
a__ : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
a__ : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
a__ , a__ , a__ : str = weights[i], biases[i], self.out_projs[i]
a__ : str = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
a__ : List[Any] = nn.functional.log_softmax(snake_case , dim=1 )
a__ : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
a__ : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
a__ : List[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
a__ : List[Any] = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , snake_case , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _snake_case ( self , snake_case ) -> List[Any]:
"""simple docstring"""
if self.n_clusters == 0:
a__ : Any = self._compute_logit(snake_case , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(snake_case , dim=-1 )
else:
# construct weights and biases
a__ , a__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
a__ , a__ : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ : Any = self.out_layers[0].weight[l_idx:r_idx]
a__ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
a__ : int = self.out_layers[i].weight
a__ : Optional[Any] = self.out_layers[i].bias
if i == 0:
a__ : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
a__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(snake_case )
biases.append(snake_case )
a__ , a__ , a__ : List[Any] = weights[0], biases[0], self.out_projs[0]
a__ : List[Any] = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
a__ : List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
a__ : Optional[Any] = nn.functional.log_softmax(snake_case , dim=1 )
a__ : Dict = [0] + self.cutoffs
for i in range(len(snake_case ) - 1 ):
a__ , a__ : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
a__ : Any = head_logprob[:, : self.cutoffs[0]]
else:
a__ , a__ , a__ : Optional[Any] = weights[i], biases[i], self.out_projs[i]
a__ : Tuple = self._compute_logit(snake_case , snake_case , snake_case , snake_case )
a__ : List[Any] = nn.functional.log_softmax(snake_case , dim=1 )
a__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
a__ : Dict = logprob_i
return out
| 701 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger()
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = "\n".join(lowerCamelCase )
Path(lowerCamelCase ).open("w" ).writelines(lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """sshleifer/tiny-mbart"""
SCREAMING_SNAKE_CASE__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self , snake_case ) -> str:
"""simple docstring"""
a__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ : Any = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ : int = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(snake_case , snake_case )
a__ : List[Any] = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
a__ : int = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ : Optional[Any] = F"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(snake_case , "argv" , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self ) -> str:
"""simple docstring"""
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self , snake_case ) -> List[Any]:
"""simple docstring"""
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
a__ : Dict = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
a__ : Optional[int] = {
"en": ["Machine learning is great, isn\'t it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
a__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir() )
a__ : str = str(tmp_dir / "scores.json" )
a__ : Optional[int] = str(tmp_dir / "val.target" )
_dump_articles(snake_case , text["en"] )
_dump_articles(snake_case , text["de"] )
a__ : int = "translation_en_to_de" if model == T5_TINY else "summarization"
a__ : int = F"""\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(snake_case , "argv" , snake_case ):
with CaptureStdout() as cs:
run_search()
a__ : List[str] = [" num_beams | length_penalty", model, "Best score args"]
a__ : List[str] = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 702 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Any = 1
a__ : Dict = 3
a__ : Union[str, Any] = (32, 32)
a__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(__A )
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
def extract(*snake_case , **snake_case ):
class __lowerCAmelCase :
def __init__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : str = torch.ones([0] )
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
self.pixel_values.to(__A )
return self
return Out()
return extract
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.dummy_cond_unet
a__ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__A , set_alpha_to_one=__A , )
a__ : Dict = self.dummy_vae
a__ : Union[str, Any] = self.dummy_text_encoder
a__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a__ : Dict = StableDiffusionPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
a__ : Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
a__ : str = "A painting of a squirrel eating a burger"
a__ : Optional[int] = torch.Generator(device=__A ).manual_seed(0 )
a__ : Optional[Any] = sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a__ : int = output.images
a__ : Dict = torch.Generator(device=__A ).manual_seed(0 )
a__ : int = sd_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__A , )[0]
a__ : Tuple = image[0, -3:, -3:, -1]
a__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : int = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a__ : Any = self.dummy_cond_unet
a__ : List[Any] = PNDMScheduler(skip_prk_steps=__A )
a__ : List[Any] = self.dummy_vae
a__ : Tuple = self.dummy_text_encoder
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
a__ : Dict = StableDiffusionPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
a__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
a__ : List[str] = "A painting of a squirrel eating a burger"
a__ : Union[str, Any] = torch.Generator(device=__A ).manual_seed(0 )
a__ : Optional[int] = sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
a__ : Tuple = output.images
a__ : int = torch.Generator(device=__A ).manual_seed(0 )
a__ : str = sd_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__A , )[0]
a__ : List[str] = image[0, -3:, -3:, -1]
a__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ : Dict = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__A )
assert isinstance(__A , __A )
assert isinstance(pipe.scheduler , __A )
assert pipe.safety_checker is None
a__ : List[str] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
a__ : Optional[int] = StableDiffusionPipeline.from_pretrained(__A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a__ : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = self.dummy_cond_unet
a__ : List[str] = PNDMScheduler(skip_prk_steps=__A )
a__ : List[Any] = self.dummy_vae
a__ : Union[str, Any] = self.dummy_text_encoder
a__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
a__ : List[Any] = unet.half()
a__ : Dict = vae.half()
a__ : str = bert.half()
# make sure here that pndm scheduler skips prk
a__ : Dict = StableDiffusionPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
a__ : Tuple = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
a__ : List[str] = "A painting of a squirrel eating a burger"
a__ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__A )
a__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a__ : Dict = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
a__ : Optional[int] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
a__ : Optional[int] = 4_003_660_346
a__ : Tuple = 7
# without safety guidance (sld_guidance_scale = 0)
a__ : Optional[Any] = torch.manual_seed(__A )
a__ : Optional[int] = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
a__ : Dict = output.images
a__ : List[Any] = image[0, -3:, -3:, -1]
a__ : Tuple = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
a__ : Any = torch.manual_seed(__A )
a__ : Dict = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : List[str] = output.images
a__ : Any = image[0, -3:, -3:, -1]
a__ : str = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__A )
a__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
a__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
a__ : Optional[Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
a__ : Any = 2_734_971_755
a__ : List[Any] = 7
a__ : int = torch.manual_seed(__A )
a__ : Any = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
a__ : List[Any] = output.images
a__ : Optional[int] = image[0, -3:, -3:, -1]
a__ : Optional[Any] = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
a__ : Dict = torch.manual_seed(__A )
a__ : str = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : Optional[int] = output.images
a__ : Dict = image[0, -3:, -3:, -1]
a__ : Any = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : str = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
a__ : Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
a__ : Union[str, Any] = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
a__ : Any = 1_044_355_234
a__ : List[str] = 12
a__ : Dict = torch.manual_seed(__A )
a__ : List[str] = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
a__ : List[Any] = output.images
a__ : Any = image[0, -3:, -3:, -1]
a__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
a__ : Tuple = torch.manual_seed(__A )
a__ : Any = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
a__ : Optional[Any] = output.images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
a__ : Any = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 703 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 0 |
def _A ( lowerCamelCase ):
a__ : Dict = len(_lowerCamelCase )
a__ : List[Any] = len(matrix[0] )
a__ : Any = min(_lowerCamelCase , _lowerCamelCase )
for row in range(_lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowerCamelCase ):
a__ : int = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase , _lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
a__ : Any = True
for i in range(row + 1 , _lowerCamelCase ):
if matrix[i][row] != 0:
a__ : Union[str, Any] = matrix[i], matrix[row]
a__ : Optional[Any] = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase ):
a__ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18}
a__ : Tuple = parent
a__ : Any = batch_size
a__ : Tuple = num_channels
a__ : int = image_size
a__ : Union[str, Any] = min_resolution
a__ : List[str] = max_resolution
a__ : str = do_resize
a__ : Dict = size
a__ : str = do_normalize
a__ : Union[str, Any] = image_mean
a__ : str = image_std
def _snake_case ( self ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
_UpperCamelCase : int = DPTImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = DPTImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
a__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
a__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Union[str, Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
a__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Optional[Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Union[str, Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 705 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 0 |
from collections.abc import Callable
import numpy as np
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Any = int(np.ceil((x_end - xa) / step_size ) )
a__ : Any = np.zeros((n + 1,) )
a__ : Optional[int] = ya
a__ : Optional[int] = xa
for k in range(snake_case__ ):
a__ : Optional[int] = y[k] + step_size * ode_func(snake_case__ , y[k] )
a__ : Any = y[k] + (
(step_size / 2) * (ode_func(snake_case__ , y[k] ) + ode_func(x + step_size , snake_case__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """efficientformer"""
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [48, 96, 224, 448] , snake_case = [True, True, True, True] , snake_case = 448 , snake_case = 32 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 16 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1E-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1E-12 , snake_case = 224 , snake_case = 1E-05 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a__ : str = hidden_act
a__ : List[str] = hidden_dropout_prob
a__ : Union[str, Any] = hidden_sizes
a__ : Union[str, Any] = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Dict = initializer_range
a__ : str = layer_norm_eps
a__ : Tuple = patch_size
a__ : Union[str, Any] = num_channels
a__ : Any = depths
a__ : Any = mlp_expansion_ratio
a__ : Union[str, Any] = downsamples
a__ : Union[str, Any] = dim
a__ : str = key_dim
a__ : List[str] = attention_ratio
a__ : Tuple = resolution
a__ : Tuple = pool_size
a__ : Optional[int] = downsample_patch_size
a__ : int = downsample_stride
a__ : Tuple = downsample_pad
a__ : Union[str, Any] = drop_path_rate
a__ : Tuple = num_metaad_blocks
a__ : Any = distillation
a__ : List[str] = use_layer_scale
a__ : Optional[int] = layer_scale_init_value
a__ : List[Any] = image_size
a__ : Any = batch_norm_eps
| 707 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
SCREAMING_SNAKE_CASE__ : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def _A ( ):
a__ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a__ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def _A ( ):
a__ : Tuple = """rougeLsum"""
a__ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
a__ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _A ( ):
a__ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
a__ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
a__ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def _A ( ):
a__ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
a__ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def _A ( ):
a__ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
a__ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
a__ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
a__ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["rougeLsum"] )["""rougeLsum"""]
assert new_score > prev_score
def _A ( ):
a__ : Optional[int] = Path("examples/seq2seq/test_data/wmt_en_ro" )
a__ : List[Any] = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
a__ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 708 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __lowerCAmelCase ( a__ ):
_UpperCamelCase : int = """git_vision_model"""
def __init__( self , snake_case=768 , snake_case=3_072 , snake_case=12 , snake_case=12 , snake_case=3 , snake_case=224 , snake_case=16 , snake_case="quick_gelu" , snake_case=1E-5 , snake_case=0.0 , snake_case=0.02 , **snake_case , ) -> str:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
a__ : Tuple = hidden_size
a__ : List[Any] = intermediate_size
a__ : List[Any] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : str = num_channels
a__ : Union[str, Any] = patch_size
a__ : List[Any] = image_size
a__ : Optional[int] = initializer_range
a__ : Tuple = attention_dropout
a__ : Union[str, Any] = layer_norm_eps
a__ : List[Any] = hidden_act
@classmethod
def _snake_case ( cls , snake_case , **snake_case ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
a__ : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
a__ : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowerCAmelCase ( a__ ):
_UpperCamelCase : List[Any] = """git"""
def __init__( self , snake_case=None , snake_case=30_522 , snake_case=768 , snake_case=6 , snake_case=12 , snake_case=3_072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_024 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case="absolute" , snake_case=True , snake_case=False , snake_case=101 , snake_case=102 , snake_case=None , **snake_case , ) -> List[Any]:
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
if vision_config is None:
a__ : List[str] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
a__ : Optional[int] = GitVisionConfig(**lowerCAmelCase__ )
a__ : str = vocab_size
a__ : Union[str, Any] = hidden_size
a__ : str = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : Any = hidden_act
a__ : Tuple = intermediate_size
a__ : List[Any] = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : str = max_position_embeddings
a__ : int = initializer_range
a__ : Optional[int] = layer_norm_eps
a__ : int = position_embedding_type
a__ : Optional[Any] = use_cache
a__ : Any = tie_word_embeddings
a__ : List[Any] = num_image_with_embedding
a__ : int = bos_token_id
a__ : str = eos_token_id
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : int = copy.deepcopy(self.__dict__ )
a__ : Optional[int] = self.vision_config.to_dict()
a__ : Optional[Any] = self.__class__.model_type
return output
| 709 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.