code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from itertools import permutations
def lowercase_ ( __snake_case : tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case__ :int = [7, 11, 13, 17]
for i, test in enumerate(__snake_case ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( __snake_case : int = 10 ) -> int:
'''simple docstring'''
return sum(
int("".join(map(__snake_case , __snake_case ) ) )
for num in permutations(range(__snake_case ) )
if is_substring_divisible(__snake_case ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _snake_case ( _A ):
_A = 'vivit'
def __init__( self ,UpperCamelCase=224 ,UpperCamelCase=32 ,UpperCamelCase=[2, 16, 16] ,UpperCamelCase=3 ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu_fast" ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-06 ,UpperCamelCase=True ,**UpperCamelCase ,) -> Optional[int]:
snake_case__ :str = hidden_size
snake_case__ :Dict = num_hidden_layers
snake_case__ :str = num_attention_heads
snake_case__ :str = intermediate_size
snake_case__ :List[str] = hidden_act
snake_case__ :Dict = hidden_dropout_prob
snake_case__ :Any = attention_probs_dropout_prob
snake_case__ :List[Any] = initializer_range
snake_case__ :Optional[Any] = layer_norm_eps
snake_case__ :Optional[Any] = image_size
snake_case__ :List[str] = num_frames
snake_case__ :Optional[Any] = tubelet_size
snake_case__ :List[str] = num_channels
snake_case__ :List[str] = qkv_bias
super().__init__(**UpperCamelCase )
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase : List[str] = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case__ :int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("DownBlock2D", "AttnDownBlock2D") ,up_block_types=("AttnUpBlock2D", "UpBlock2D") ,)
return model
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = self.dummy_uncond_unet
snake_case__ :Optional[Any] = KarrasVeScheduler()
snake_case__ :Any = KarrasVePipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :str = torch.manual_seed(0 )
snake_case__ :Dict = pipe(num_inference_steps=2 ,generator=UpperCamelCase ,output_type="numpy" ).images
snake_case__ :str = torch.manual_seed(0 )
snake_case__ :Union[str, Any] = pipe(num_inference_steps=2 ,generator=UpperCamelCase ,output_type="numpy" ,return_dict=UpperCamelCase )[0]
snake_case__ :List[Any] = image[0, -3:, -3:, -1]
snake_case__ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = "google/ncsnpp-celebahq-256"
snake_case__ :int = UNetaDModel.from_pretrained(UpperCamelCase )
snake_case__ :Tuple = KarrasVeScheduler()
snake_case__ :str = KarrasVePipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[Any] = torch.manual_seed(0 )
snake_case__ :Any = pipe(num_inference_steps=20 ,generator=UpperCamelCase ,output_type="numpy" ).images
snake_case__ :str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ :str = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : List[str] = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 57
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _snake_case ( _A ):
_A = 'xlm'
_A = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self ,UpperCamelCase=30_145 ,UpperCamelCase=2_048 ,UpperCamelCase=12 ,UpperCamelCase=16 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=1 ,UpperCamelCase=True ,UpperCamelCase=512 ,UpperCamelCase=2_048**-0.5 ,UpperCamelCase=1E-12 ,UpperCamelCase=0.02 ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=5 ,UpperCamelCase=True ,UpperCamelCase="first" ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=0.1 ,UpperCamelCase=5 ,UpperCamelCase=5 ,UpperCamelCase=0 ,UpperCamelCase=0 ,UpperCamelCase=2 ,UpperCamelCase=0 ,**UpperCamelCase ,) -> Tuple:
snake_case__ :Optional[Any] = vocab_size
snake_case__ :Tuple = emb_dim
snake_case__ :Optional[int] = n_layers
snake_case__ :Optional[int] = n_heads
snake_case__ :Optional[int] = dropout
snake_case__ :Union[str, Any] = attention_dropout
snake_case__ :Dict = gelu_activation
snake_case__ :str = sinusoidal_embeddings
snake_case__ :Union[str, Any] = causal
snake_case__ :List[Any] = asm
snake_case__ :List[Any] = n_langs
snake_case__ :Dict = use_lang_emb
snake_case__ :List[str] = layer_norm_eps
snake_case__ :Dict = bos_index
snake_case__ :Optional[int] = eos_index
snake_case__ :Tuple = pad_index
snake_case__ :Union[str, Any] = unk_index
snake_case__ :Dict = mask_index
snake_case__ :List[Any] = is_encoder
snake_case__ :Any = max_position_embeddings
snake_case__ :Any = embed_init_std
snake_case__ :Tuple = init_std
snake_case__ :List[str] = summary_type
snake_case__ :List[Any] = summary_use_proj
snake_case__ :int = summary_activation
snake_case__ :int = summary_proj_to_labels
snake_case__ :Tuple = summary_first_dropout
snake_case__ :Optional[Any] = start_n_top
snake_case__ :int = end_n_top
snake_case__ :Optional[int] = mask_token_id
snake_case__ :Optional[int] = lang_id
if "n_words" in kwargs:
snake_case__ :int = kwargs["n_words"]
super().__init__(pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,**UpperCamelCase )
class _snake_case ( _A ):
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ :Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ :Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 57
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 1
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( _A , _A , unittest.TestCase ):
_A = AutoencoderKL
_A = 'sample'
_A = 1e-2
@property
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = 4
snake_case__ :str = 3
snake_case__ :Optional[int] = (32, 32)
snake_case__ :str = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase )
return {"sample": image}
@property
def lowerCAmelCase_ ( self ) -> str:
return (3, 32, 32)
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return (3, 32, 32)
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
snake_case__ :List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" ,"Gradient checkpointing skipped on MPS" )
def lowerCAmelCase_ ( self ) -> List[str]:
# enable deterministic behavior for gradient checkpointing
snake_case__ , snake_case__ :Any = self.prepare_init_args_and_inputs_for_common()
snake_case__ :Optional[int] = self.model_class(**UpperCamelCase )
model.to(UpperCamelCase )
assert not model.is_gradient_checkpointing and model.training
snake_case__ :Union[str, Any] = model(**UpperCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case__ :Optional[int] = torch.randn_like(UpperCamelCase )
snake_case__ :List[str] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case__ :Any = self.model_class(**UpperCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case__ :str = model_a(**UpperCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case__ :List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
snake_case__ :Optional[int] = dict(model.named_parameters() )
snake_case__ :List[str] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5E-5 ) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :Optional[int] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ,output_loading_info=UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(UpperCamelCase )
snake_case__ :str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
snake_case__ :str = model.to(UpperCamelCase )
model.eval()
if torch_device == "mps":
snake_case__ :int = torch.manual_seed(0 )
else:
snake_case__ :Tuple = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
snake_case__ :Optional[int] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
snake_case__ :Optional[Any] = image.to(UpperCamelCase )
with torch.no_grad():
snake_case__ :Dict = model(UpperCamelCase ,sample_posterior=UpperCamelCase ,generator=UpperCamelCase ).sample
snake_case__ :Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case__ :Union[str, Any] = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
snake_case__ :str = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case__ :List[str] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(UpperCamelCase ,UpperCamelCase ,rtol=1E-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> str:
return f'gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase ) for s in shape] )}.npy'
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ,UpperCamelCase=(4, 3, 512, 512) ,UpperCamelCase=False ) -> Optional[int]:
snake_case__ :str = torch.floataa if fpaa else torch.floataa
snake_case__ :Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCamelCase ,UpperCamelCase ) ) ).to(UpperCamelCase ).to(UpperCamelCase )
return image
def lowerCAmelCase_ ( self ,UpperCamelCase="CompVis/stable-diffusion-v1-4" ,UpperCamelCase=False ) -> Dict:
snake_case__ :List[Any] = "fp16" if fpaa else None
snake_case__ :Dict = torch.floataa if fpaa else torch.floataa
snake_case__ :List[str] = AutoencoderKL.from_pretrained(
UpperCamelCase ,subfolder="vae" ,torch_dtype=UpperCamelCase ,revision=UpperCamelCase ,)
model.to(UpperCamelCase ).eval()
return model
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ) -> List[Any]:
if torch_device == "mps":
return torch.manual_seed(UpperCamelCase )
return torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :List[Any] = self.get_sd_vae_model()
snake_case__ :Tuple = self.get_sd_image(UpperCamelCase )
snake_case__ :Any = self.get_generator(UpperCamelCase )
with torch.no_grad():
snake_case__ :Tuple = model(UpperCamelCase ,generator=UpperCamelCase ,sample_posterior=UpperCamelCase ).sample
assert sample.shape == image.shape
snake_case__ :Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case__ :str = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.get_sd_vae_model(fpaa=UpperCamelCase )
snake_case__ :str = self.get_sd_image(UpperCamelCase ,fpaa=UpperCamelCase )
snake_case__ :List[Any] = self.get_generator(UpperCamelCase )
with torch.no_grad():
snake_case__ :str = model(UpperCamelCase ,generator=UpperCamelCase ,sample_posterior=UpperCamelCase ).sample
assert sample.shape == image.shape
snake_case__ :Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case__ :Optional[int] = torch.tensor(UpperCamelCase )
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :Dict = self.get_sd_vae_model()
snake_case__ :List[str] = self.get_sd_image(UpperCamelCase )
with torch.no_grad():
snake_case__ :str = model(UpperCamelCase ).sample
assert sample.shape == image.shape
snake_case__ :Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case__ :List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> str:
snake_case__ :Optional[Any] = self.get_sd_vae_model()
snake_case__ :Union[str, Any] = self.get_sd_image(UpperCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case__ :Optional[int] = model.decode(UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case__ :str = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case__ :Optional[int] = torch.tensor(UpperCamelCase )
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :str = self.get_sd_vae_model(fpaa=UpperCamelCase )
snake_case__ :Union[str, Any] = self.get_sd_image(UpperCamelCase ,shape=(3, 4, 64, 64) ,fpaa=UpperCamelCase )
with torch.no_grad():
snake_case__ :Dict = model.decode(UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case__ :List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case__ :Union[str, Any] = torch.tensor(UpperCamelCase )
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :Union[str, Any] = self.get_sd_vae_model(fpaa=UpperCamelCase )
snake_case__ :Any = self.get_sd_image(UpperCamelCase ,shape=(3, 4, 64, 64) ,fpaa=UpperCamelCase )
with torch.no_grad():
snake_case__ :str = model.decode(UpperCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case__ :List[Any] = model.decode(UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="xformers is not required when using PyTorch 2.0." )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Any = self.get_sd_vae_model()
snake_case__ :int = self.get_sd_image(UpperCamelCase ,shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case__ :Tuple = model.decode(UpperCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case__ :str = model.decode(UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :Tuple = self.get_sd_vae_model()
snake_case__ :Any = self.get_sd_image(UpperCamelCase )
snake_case__ :Optional[int] = self.get_generator(UpperCamelCase )
with torch.no_grad():
snake_case__ :int = model.encode(UpperCamelCase ).latent_dist
snake_case__ :Union[str, Any] = dist.sample(generator=UpperCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case__ :List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case__ :Optional[int] = torch.tensor(UpperCamelCase )
snake_case__ :str = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(UpperCamelCase ,UpperCamelCase ,atol=UpperCamelCase )
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase : List[Any] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase_ ( __snake_case : List[str] ) -> Dict:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case__ :Optional[Any] = k.replace(__snake_case , __snake_case )
if k.startswith("encoder" ):
snake_case__ :int = k.replace(".attn" , ".self_attn" )
snake_case__ :int = k.replace("norm1" , "self_attn_layer_norm" )
snake_case__ :List[str] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
snake_case__ :Optional[Any] = k.replace("norm1" , "self_attn_layer_norm" )
snake_case__ :Optional[int] = k.replace("norm2" , "encoder_attn_layer_norm" )
snake_case__ :Dict = k.replace("norm3" , "final_layer_norm" )
return k
def lowercase_ ( __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
snake_case__ :str = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
snake_case__ :int = sd.pop(__snake_case )
snake_case__ :int = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
snake_case__ :str = v
__UpperCAmelCase : Tuple = ["START"]
@torch.no_grad()
def lowercase_ ( __snake_case : int , __snake_case : Dict , __snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = torch.load(__snake_case , map_location="cpu" )
snake_case__ :int = model["model"]
snake_case__ :List[Any] = BlenderbotConfig.from_json_file(__snake_case )
snake_case__ :Optional[Any] = BlenderbotForConditionalGeneration(__snake_case )
snake_case__ :List[Any] = m.model.state_dict().keys()
snake_case__ :int = []
snake_case__ :List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case__ :Any = rename_state_dict_key(__snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case__ :List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__snake_case )
m.model.load_state_dict(__snake_case , strict=__snake_case )
m.half()
m.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 57
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 1
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _A , unittest.TestCase ):
_A = OpenAIGPTTokenizer
_A = OpenAIGPTTokenizerFast
_A = True
_A = False
def lowerCAmelCase_ ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ :str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
snake_case__ :Dict = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
snake_case__ :Dict = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
snake_case__ :Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ :Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file ,"w" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
return "lower newer", "lower newer"
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[int] = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
snake_case__ :Tuple = "lower"
snake_case__ :List[Any] = ["low", "er</w>"]
snake_case__ :Optional[int] = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :List[Any] = tokens + ["<unk>"]
snake_case__ :Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase=15 ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
# Simple input
snake_case__ :Optional[int] = "This is a simple input"
snake_case__ :Any = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ :str = ("This is a simple input", "This is a pair")
snake_case__ :Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase ,tokenizer_r.encode ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Simple input
self.assertRaises(UpperCamelCase ,tokenizer_r.encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Simple input
self.assertRaises(
UpperCamelCase ,tokenizer_r.batch_encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" ,)
# Pair input
self.assertRaises(UpperCamelCase ,tokenizer_r.encode ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Pair input
self.assertRaises(UpperCamelCase ,tokenizer_r.encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Pair input
self.assertRaises(
UpperCamelCase ,tokenizer_r.batch_encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" ,)
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _snake_case ( _A ):
pass
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
_A = BlenderbotConfig
_A = {}
_A = 'gelu'
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=20 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=0 ,) -> Any:
snake_case__ :Tuple = parent
snake_case__ :Dict = batch_size
snake_case__ :Optional[Any] = seq_length
snake_case__ :int = is_training
snake_case__ :int = use_labels
snake_case__ :int = vocab_size
snake_case__ :Any = hidden_size
snake_case__ :Union[str, Any] = num_hidden_layers
snake_case__ :List[str] = num_attention_heads
snake_case__ :Optional[int] = intermediate_size
snake_case__ :List[Any] = hidden_dropout_prob
snake_case__ :List[str] = attention_probs_dropout_prob
snake_case__ :Optional[int] = max_position_embeddings
snake_case__ :str = eos_token_id
snake_case__ :Any = pad_token_id
snake_case__ :Optional[Any] = bos_token_id
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
snake_case__ :Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
snake_case__ :Optional[int] = tf.concat([input_ids, eos_tensor] ,axis=1 )
snake_case__ :List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :Dict = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
snake_case__ :str = prepare_blenderbot_inputs_dict(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :int = TFBlenderbotModel(config=UpperCamelCase ).get_decoder()
snake_case__ :Optional[int] = inputs_dict["input_ids"]
snake_case__ :List[Any] = input_ids[:1, :]
snake_case__ :Union[str, Any] = inputs_dict["attention_mask"][:1, :]
snake_case__ :Any = inputs_dict["head_mask"]
snake_case__ :List[str] = 1
# first forward pass
snake_case__ :Optional[int] = model(UpperCamelCase ,attention_mask=UpperCamelCase ,head_mask=UpperCamelCase ,use_cache=UpperCamelCase )
snake_case__ , snake_case__ :Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ :str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
snake_case__ :Dict = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
snake_case__ :str = tf.concat([input_ids, next_tokens] ,axis=-1 )
snake_case__ :Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
snake_case__ :Union[str, Any] = model(UpperCamelCase ,attention_mask=UpperCamelCase )[0]
snake_case__ :int = model(UpperCamelCase ,attention_mask=UpperCamelCase ,past_key_values=UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
snake_case__ :Optional[Any] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
snake_case__ :Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ :Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase ,UpperCamelCase ,rtol=1E-3 )
def lowercase_ ( __snake_case : str , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
snake_case__ :Union[str, Any] = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ :Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ :Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ :int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case__ :str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
_A = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[int] = TFBlenderbotModelTester(self )
snake_case__ :int = ConfigTester(self ,config_class=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase )
@require_tokenizers
@require_tf
class _snake_case ( unittest.TestCase ):
_A = ['My friends are cool but they eat too many carbs.']
_A = 'facebook/blenderbot-400M-distill'
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :int = self.tokenizer(self.src_text ,return_tensors="tf" )
snake_case__ :Optional[int] = self.model.generate(
model_inputs.input_ids ,)
snake_case__ :Any = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=UpperCamelCase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _A , unittest.TestCase ):
_A = XLMTokenizer
_A = False
def lowerCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ :str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
snake_case__ :List[Any] = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
snake_case__ :Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
snake_case__ :Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ :Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file ,"w" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
snake_case__ :Dict = "lower newer"
snake_case__ :Optional[int] = "lower newer"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = XLMTokenizer(self.vocab_file ,self.merges_file )
snake_case__ :Tuple = "lower"
snake_case__ :List[Any] = ["low", "er</w>"]
snake_case__ :Tuple = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :str = tokens + ["<unk>"]
snake_case__ :str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
snake_case__ :Union[str, Any] = tokenizer.encode("sequence builders" ,add_special_tokens=UpperCamelCase )
snake_case__ :Optional[Any] = tokenizer.encode("multi-sequence build" ,add_special_tokens=UpperCamelCase )
snake_case__ :str = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
snake_case__ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ,UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 57
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 1
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase_ ( __snake_case : BertModel , __snake_case : str , __snake_case : str ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :List[str] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case__ :List[str] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__snake_case ):
os.makedirs(__snake_case )
snake_case__ :Tuple = model.state_dict()
def to_tf_var_name(__snake_case : str ):
for patt, repl in iter(__snake_case ):
snake_case__ :Tuple = name.replace(__snake_case , __snake_case )
return F'bert/{name}'
def create_tf_var(__snake_case : np.ndarray , __snake_case : str , __snake_case : tf.Session ):
snake_case__ :str = tf.dtypes.as_dtype(tensor.dtype )
snake_case__ :str = tf.get_variable(dtype=__snake_case , shape=tensor.shape , name=__snake_case , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__snake_case )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case__ :str = to_tf_var_name(__snake_case )
snake_case__ :Optional[int] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case__ :Tuple = torch_tensor.T
snake_case__ :List[Any] = create_tf_var(tensor=__snake_case , name=__snake_case , session=__snake_case )
tf.keras.backend.set_value(__snake_case , __snake_case )
snake_case__ :Dict = session.run(__snake_case )
print(F'Successfully created {tf_name}: {np.allclose(__snake_case , __snake_case )}' )
snake_case__ :List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(__snake_case , os.path.join(__snake_case , model_name.replace("-" , "_" ) + ".ckpt" ) )
def lowercase_ ( __snake_case : Dict=None ) -> Tuple:
'''simple docstring'''
snake_case__ :int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__snake_case , required=__snake_case , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__snake_case , default=__snake_case , required=__snake_case , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__snake_case , required=__snake_case , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__snake_case , required=__snake_case , help="Directory in which to save tensorflow model" )
snake_case__ :Optional[int] = parser.parse_args(__snake_case )
snake_case__ :Any = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__snake_case , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 57
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCAmelCase : Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase_ ( __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Optional[int] = {}
with open(__snake_case , "r" ) as file:
for line_number, line in enumerate(__snake_case ):
snake_case__ :Any = line.strip()
if line:
snake_case__ :Optional[int] = line.split()
snake_case__ :int = line_number
snake_case__ :Tuple = words[0]
snake_case__ :str = value
return result
def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case )
snake_case__ :int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
snake_case__ :Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case__ :List[str] = "param"
if weight_type is not None and weight_type != "param":
snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case ).shape
elif weight_type is not None and weight_type == "param":
snake_case__ :Optional[Any] = hf_pointer
for attribute in hf_param_name.split("." ):
snake_case__ :Any = getattr(__snake_case , __snake_case )
snake_case__ :Tuple = shape_pointer.shape
# let's reduce dimension
snake_case__ :Tuple = value[0]
else:
snake_case__ :Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case__ :Tuple = value
elif weight_type == "weight_g":
snake_case__ :Any = value
elif weight_type == "weight_v":
snake_case__ :Tuple = value
elif weight_type == "bias":
snake_case__ :Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
snake_case__ :Dict = getattr(__snake_case , __snake_case )
snake_case__ :Tuple = value
else:
snake_case__ :List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ :str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
snake_case__ :Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case__ :Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
snake_case__ :List[str] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case__ :Optional[int] = ".".join([key, hf_param_name] )
else:
snake_case__ :List[str] = key
snake_case__ :Tuple = value if "lm_head" in full_key else value[0]
__UpperCAmelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Tuple = False
for key, mapped_key in MAPPING.items():
snake_case__ :Optional[int] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case__ :Optional[int] = True
if "*" in mapped_key:
snake_case__ :Dict = name.split(__snake_case )[0].split("." )[-2]
snake_case__ :List[str] = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
snake_case__ :List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ :Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ :Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ :Optional[int] = "weight"
else:
snake_case__ :int = None
if hf_dict is not None:
rename_dict(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
else:
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return is_used
return is_used
def lowercase_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Any ) -> int:
'''simple docstring'''
snake_case__ :Any = []
snake_case__ :Optional[Any] = fairseq_model.state_dict()
snake_case__ :List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ :Any = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case__ :int = True
else:
snake_case__ :str = load_wavaveca_layer(__snake_case , __snake_case , __snake_case )
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Dict ) -> str:
'''simple docstring'''
snake_case__ :Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ :Union[str, Any] = name.split("." )
snake_case__ :List[str] = int(items[0] )
snake_case__ :Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case__ :Optional[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case__ :Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case__ :Tuple = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case__ :Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase_ ( __snake_case : Dict , __snake_case : int , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : Dict=True , __snake_case : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
snake_case__ :List[str] = WavaVecaConfig.from_pretrained(__snake_case )
else:
snake_case__ :int = WavaVecaConfig()
if is_seq_class:
snake_case__ :str = read_txt_into_dict(__snake_case )
snake_case__ :Optional[Any] = idalabel
snake_case__ :Optional[int] = WavaVecaForSequenceClassification(__snake_case )
snake_case__ :List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
feature_extractor.save_pretrained(__snake_case )
elif is_finetuned:
if dict_path:
snake_case__ :Tuple = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ :str = target_dict.pad_index
snake_case__ :Tuple = target_dict.bos_index
snake_case__ :Union[str, Any] = target_dict.eos_index
snake_case__ :Union[str, Any] = len(target_dict.symbols )
snake_case__ :List[str] = os.path.join(__snake_case , "vocab.json" )
if not os.path.isdir(__snake_case ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
snake_case__ :Any = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ :Optional[Any] = 0
snake_case__ :str = 1
with open(__snake_case , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__snake_case , )
snake_case__ :int = True if config.feat_extract_norm == "layer" else False
snake_case__ :Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
snake_case__ :List[Any] = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
snake_case__ :Tuple = WavaVecaForCTC(__snake_case )
else:
snake_case__ :List[str] = WavaVecaForPreTraining(__snake_case )
if is_finetuned or is_seq_class:
snake_case__ , snake_case__ , snake_case__ :Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case__ :Optional[Any] = argparse.Namespace(task="audio_pretraining" )
snake_case__ :Optional[Any] = fairseq.tasks.setup_task(__snake_case )
snake_case__ , snake_case__ , snake_case__ :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
snake_case__ :Any = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 57
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : str = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 1
|
from __future__ import annotations
from typing import Any
class _snake_case :
def __init__( self ,UpperCamelCase = 6 ) -> None:
snake_case__ :Node | None = None
snake_case__ :Node | None = None
self.create_linked_list(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> None:
snake_case__ :List[Any] = Node()
snake_case__ :Optional[Any] = current_node
snake_case__ :Dict = current_node
snake_case__ :str = current_node
for _ in range(1 ,UpperCamelCase ):
snake_case__ :Any = Node()
snake_case__ :List[Any] = current_node
snake_case__ :List[Any] = previous_node
snake_case__ :List[Any] = current_node
snake_case__ :Tuple = self.front
snake_case__ :Dict = previous_node
def lowerCAmelCase_ ( self ) -> bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCAmelCase_ ( self ) -> Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
snake_case__ :Union[str, Any] = self.rear.next
if self.rear:
snake_case__ :int = data
def lowerCAmelCase_ ( self ) -> Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
snake_case__ :Tuple = self.front.data
snake_case__ :List[str] = None
return data
snake_case__ :Tuple = self.front
snake_case__ :Optional[Any] = old_front.next
snake_case__ :str = old_front.data
snake_case__ :Dict = None
return data
def lowerCAmelCase_ ( self ) -> None:
if self.is_empty():
raise Exception("Empty Queue" )
def lowerCAmelCase_ ( self ) -> None:
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class _snake_case :
def __init__( self ) -> None:
snake_case__ :Any | None = None
snake_case__ :Node | None = None
snake_case__ :Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase_ ( __snake_case : int ) -> Any:
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(__snake_case , "_dynamo" ):
return False
return isinstance(__snake_case , torch._dynamo.eval_frame.OptimizedModule )
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : bool = True ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[str] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
snake_case__ :List[str] = is_compiled_module(__snake_case )
if is_compiled:
snake_case__ :Tuple = model
snake_case__ :Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__snake_case , __snake_case ):
snake_case__ :List[str] = model.module
if not keep_fpaa_wrapper:
snake_case__ :Optional[Any] = getattr(__snake_case , "forward" )
snake_case__ :Tuple = model.__dict__.pop("_original_forward" , __snake_case )
if original_forward is not None:
while hasattr(__snake_case , "__wrapped__" ):
snake_case__ :Tuple = forward.__wrapped__
if forward == original_forward:
break
snake_case__ :Dict = forward
if getattr(__snake_case , "_converted_to_transformer_engine" , __snake_case ):
convert_model(__snake_case , to_transformer_engine=__snake_case )
if is_compiled:
snake_case__ :Union[str, Any] = model
snake_case__ :str = compiled_model
return model
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
PartialState().wait_for_everyone()
def lowercase_ ( __snake_case : List[str] , __snake_case : Tuple ) -> List[str]:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__snake_case , __snake_case )
elif PartialState().local_process_index == 0:
torch.save(__snake_case , __snake_case )
@contextmanager
def lowercase_ ( **__snake_case : str ) -> List[Any]:
'''simple docstring'''
for key, value in kwargs.items():
snake_case__ :Dict = str(__snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase_ ( __snake_case : Dict ) -> List[str]:
'''simple docstring'''
if not hasattr(__snake_case , "__qualname__" ) and not hasattr(__snake_case , "__name__" ):
snake_case__ :List[Any] = getattr(__snake_case , "__class__" , __snake_case )
if hasattr(__snake_case , "__qualname__" ):
return obj.__qualname__
if hasattr(__snake_case , "__name__" ):
return obj.__name__
return str(__snake_case )
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for key, value in source.items():
if isinstance(__snake_case , __snake_case ):
snake_case__ :str = destination.setdefault(__snake_case , {} )
merge_dicts(__snake_case , __snake_case )
else:
snake_case__ :Any = value
return destination
def lowercase_ ( __snake_case : int = None ) -> bool:
'''simple docstring'''
if port is None:
snake_case__ :Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 57
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase_ ( __snake_case : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case__ :str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def lowercase_ ( __snake_case : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ :Optional[int] = emb.weight.shape
snake_case__ :Any = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
snake_case__ :Dict = emb.weight.data
return lin_layer
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Any=None ) -> List[Any]:
'''simple docstring'''
snake_case__ :Any = {}
for old_key in state_dict.keys():
snake_case__ :Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case__ :Union[str, Any] = key.replace("moe_layer.experts.0" , F'ffn.experts.expert_{expert_idx}' )
else:
snake_case__ :Dict = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
snake_case__ :Optional[int] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
snake_case__ :int = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
snake_case__ :Union[str, Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
snake_case__ :List[Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
snake_case__ :Optional[Any] = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
snake_case__ :Optional[int] = key.replace("final_layer_norm" , "ff_layer_norm" )
snake_case__ :Optional[int] = state_dict[old_key]
return new_dict
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : str = WEIGHTS_NAME ) -> List[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = []
snake_case__ :Union[str, Any] = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
snake_case__ :Optional[Any] = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(__snake_case ):
snake_case__ :Optional[int] = torch.load(__snake_case )["model"]
remove_ignore_keys_(__snake_case )
snake_case__ :Dict = rename_fairseq_keys(__snake_case , __snake_case )
snake_case__ :Optional[int] = os.path.join(
__snake_case , weights_name.replace(".bin" , F'-{len(__snake_case )+1:05d}-of-???.bin' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
snake_case__ :List[Any] = os.path.join(__snake_case , weights_name.replace(".bin" , F'-{len(__snake_case )+1:05d}-of-???.bin' ) )
snake_case__ :List[Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__snake_case )
snake_case__ :Optional[Any] = rename_fairseq_keys(__snake_case , __snake_case )
snake_case__ :int = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
snake_case__ :int = os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
snake_case__ :Any = {}
for idx, shard in enumerate(__snake_case ):
snake_case__ :Optional[Any] = weights_name.replace(".bin" , F'-{idx+1:05d}-of-{len(__snake_case ):05d}.bin' )
snake_case__ :List[Any] = os.path.join(__snake_case , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
snake_case__ :Optional[int] = shard_file
# Add the metadata
snake_case__ :Tuple = {"total_size": total_size}
snake_case__ :Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__snake_case , __snake_case ) , "w" , encoding="utf-8" ) as f:
snake_case__ :List[Any] = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase , __UpperCAmelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
__UpperCAmelCase : int = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCAmelCase : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 57
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _A , _A , _A , unittest.TestCase ):
_A = StableDiffusionInstructPixaPixPipeline
_A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_A = IMAGE_TO_IMAGE_IMAGE_PARAMS
_A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ :Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=8 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
snake_case__ :Union[str, Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase )
torch.manual_seed(0 )
snake_case__ :Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
snake_case__ :Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
snake_case__ :Optional[Any] = CLIPTextModel(UpperCamelCase )
snake_case__ :Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case__ :Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[int]:
snake_case__ :Tuple = floats_tensor((1, 3, 32, 32) ,rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ :Tuple = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case__ :Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" )
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :Dict = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :List[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Union[str, Any] = self.get_dummy_components()
snake_case__ :List[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
snake_case__ :int = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Optional[Any] = sd_pipe(**UpperCamelCase ).images
snake_case__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :Union[str, Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Any = self.get_dummy_components()
snake_case__ :List[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
snake_case__ :Optional[Any] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[int] = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :List[str] = "french fries"
snake_case__ :int = sd_pipe(**UpperCamelCase ,negative_prompt=UpperCamelCase )
snake_case__ :Optional[int] = output.images
snake_case__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ :Optional[int] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :List[Any] = self.get_dummy_components()
snake_case__ :Optional[int] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
snake_case__ :List[Any] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Tuple = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :int = [inputs["prompt"]] * 2
snake_case__ :Optional[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
snake_case__ :Union[str, Any] = torch.from_numpy(UpperCamelCase ).unsqueeze(0 ).to(UpperCamelCase )
snake_case__ :Tuple = image / 2 + 0.5
snake_case__ :Tuple = image.permute(0 ,3 ,1 ,2 )
snake_case__ :Dict = image.repeat(2 ,1 ,1 ,1 )
snake_case__ :List[Any] = sd_pipe(**UpperCamelCase ).images
snake_case__ :Tuple = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
snake_case__ :str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Dict = self.get_dummy_components()
snake_case__ :Any = EulerAncestralDiscreteScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" )
snake_case__ :Tuple = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
snake_case__ :str = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Dict = sd_pipe(**UpperCamelCase ).images
snake_case__ :int = image[0, -3:, -3:, -1]
snake_case__ :Optional[int] = [round(UpperCamelCase ,4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(UpperCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
snake_case__ :str = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[Any] = self.get_dummy_components()
snake_case__ :List[Any] = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase )
snake_case__ :List[Any] = VaeImageProcessor(do_resize=UpperCamelCase ,do_normalize=UpperCamelCase )
snake_case__ :Any = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Union[str, Any] = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase ,input_image_type="pt" ) )[0]
snake_case__ :Dict = components["vae"]
snake_case__ :Optional[Any] = self.get_dummy_inputs_by_type(UpperCamelCase ,input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ :List[str] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ :Dict = pipe(**UpperCamelCase )[0]
snake_case__ :str = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase ,1E-4 ,"passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ) -> Optional[int]:
snake_case__ :List[Any] = torch.manual_seed(UpperCamelCase )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case__ :List[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" ,safety_checker=UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ :int = self.get_inputs()
snake_case__ :List[str] = pipe(**UpperCamelCase ).images
snake_case__ :Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ :Dict = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" ,safety_checker=UpperCamelCase )
snake_case__ :List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ :Optional[Any] = self.get_inputs()
snake_case__ :Dict = pipe(**UpperCamelCase ).images
snake_case__ :Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ :Optional[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" ,safety_checker=UpperCamelCase )
snake_case__ :Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ :Tuple = self.get_inputs()
snake_case__ :str = pipe(**UpperCamelCase ).images
snake_case__ :Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
snake_case__ :List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = 0
def callback_fn(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ :Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ :Any = latents[0, -3:, -3:, -1]
snake_case__ :Optional[Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case__ :Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
snake_case__ :List[str] = latents[0, -3:, -3:, -1]
snake_case__ :Tuple = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case__ :Any = False
snake_case__ :Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" ,safety_checker=UpperCamelCase ,torch_dtype=torch.floataa )
snake_case__ :Optional[Any] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ :Dict = self.get_inputs()
pipe(**UpperCamelCase ,callback=UpperCamelCase ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase_ ( self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ :int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" ,safety_checker=UpperCamelCase ,torch_dtype=torch.floataa )
snake_case__ :int = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ :Any = self.get_inputs()
snake_case__ :int = pipe(**UpperCamelCase )
snake_case__ :Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ :Optional[int] = inputs["image"].resize((504, 504) )
snake_case__ :List[Any] = "timbrooks/instruct-pix2pix"
snake_case__ :List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase ,safety_checker=UpperCamelCase ,)
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
snake_case__ :Optional[int] = pipe(**UpperCamelCase )
snake_case__ :Any = output.images[0]
snake_case__ :Union[str, Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
snake_case__ :Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 57
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 1
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=[1, 1, 2] ,UpperCamelCase=1 ,UpperCamelCase=32 ,UpperCamelCase=4 ,UpperCamelCase=8 ,UpperCamelCase=37 ,UpperCamelCase="gelu_new" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=0.0 ,UpperCamelCase=512 ,UpperCamelCase=3 ,UpperCamelCase=0.02 ,UpperCamelCase=3 ,UpperCamelCase=4 ,UpperCamelCase=None ,UpperCamelCase=False ,) -> List[Any]:
snake_case__ :Optional[Any] = parent
snake_case__ :List[str] = batch_size
snake_case__ :Optional[int] = seq_length
snake_case__ :int = is_training
snake_case__ :Union[str, Any] = use_input_mask
snake_case__ :Any = use_token_type_ids
snake_case__ :int = use_labels
snake_case__ :List[Any] = vocab_size
snake_case__ :str = block_sizes
snake_case__ :Union[str, Any] = num_decoder_layers
snake_case__ :str = d_model
snake_case__ :str = n_head
snake_case__ :Union[str, Any] = d_head
snake_case__ :List[Any] = d_inner
snake_case__ :Any = hidden_act
snake_case__ :Tuple = hidden_dropout
snake_case__ :List[Any] = attention_dropout
snake_case__ :Any = activation_dropout
snake_case__ :Optional[int] = max_position_embeddings
snake_case__ :List[str] = type_vocab_size
snake_case__ :Tuple = 2
snake_case__ :Any = num_labels
snake_case__ :str = num_choices
snake_case__ :Union[str, Any] = scope
snake_case__ :str = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case__ :List[str] = n_head
# Used in the tests to check the size of the first hidden state
snake_case__ :Optional[int] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case__ :Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case__ :Any = self.num_hidden_layers + 2
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :Tuple = None
if self.use_input_mask:
snake_case__ :Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :Any = None
if self.use_token_type_ids:
snake_case__ :Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ :Dict = None
snake_case__ :Union[str, Any] = None
snake_case__ :Dict = None
if self.use_labels:
snake_case__ :Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ :List[str] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ :List[Any] = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> List[Any]:
snake_case__ :Any = TFFunnelModel(config=UpperCamelCase )
snake_case__ :List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :Any = model(UpperCamelCase )
snake_case__ :Optional[Any] = [input_ids, input_mask]
snake_case__ :Optional[Any] = model(UpperCamelCase )
snake_case__ :Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
snake_case__ :Dict = False
snake_case__ :Optional[Any] = TFFunnelModel(config=UpperCamelCase )
snake_case__ :str = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
snake_case__ :List[Any] = False
snake_case__ :Optional[Any] = TFFunnelModel(config=UpperCamelCase )
snake_case__ :Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Tuple:
snake_case__ :List[Any] = TFFunnelBaseModel(config=UpperCamelCase )
snake_case__ :Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :Optional[int] = model(UpperCamelCase )
snake_case__ :List[str] = [input_ids, input_mask]
snake_case__ :Optional[Any] = model(UpperCamelCase )
snake_case__ :Any = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
snake_case__ :Optional[Any] = False
snake_case__ :Optional[int] = TFFunnelBaseModel(config=UpperCamelCase )
snake_case__ :List[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
snake_case__ :Dict = False
snake_case__ :List[Any] = TFFunnelBaseModel(config=UpperCamelCase )
snake_case__ :Dict = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> List[Any]:
snake_case__ :Tuple = TFFunnelForPreTraining(config=UpperCamelCase )
snake_case__ :int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Any:
snake_case__ :List[Any] = TFFunnelForMaskedLM(config=UpperCamelCase )
snake_case__ :List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> int:
snake_case__ :Union[str, Any] = self.num_labels
snake_case__ :Tuple = TFFunnelForSequenceClassification(config=UpperCamelCase )
snake_case__ :Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :List[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> List[str]:
snake_case__ :Dict = self.num_choices
snake_case__ :List[str] = TFFunnelForMultipleChoice(config=UpperCamelCase )
snake_case__ :Tuple = tf.tile(tf.expand_dims(UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
snake_case__ :str = tf.tile(tf.expand_dims(UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
snake_case__ :Dict = tf.tile(tf.expand_dims(UpperCamelCase ,1 ) ,(1, self.num_choices, 1) )
snake_case__ :Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ :Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> int:
snake_case__ :Any = self.num_labels
snake_case__ :Tuple = TFFunnelForTokenClassification(config=UpperCamelCase )
snake_case__ :int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :str = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Union[str, Any]:
snake_case__ :List[str] = TFFunnelForQuestionAnswering(config=UpperCamelCase )
snake_case__ :Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ :Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) :str = config_and_inputs
snake_case__ :Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = TFFunnelModelTester(self )
snake_case__ :Optional[Any] = ConfigTester(self ,config_class=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@require_tf
class _snake_case ( _A , unittest.TestCase ):
_A = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :List[str] = TFFunnelModelTester(self ,base=UpperCamelCase )
snake_case__ :str = ConfigTester(self ,config_class=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
| 57
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCAmelCase : List[Any] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : Tuple = logging.getLogger()
def lowercase_ ( ) -> Dict:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case__ :Tuple = parser.parse_args()
return args.f
def lowercase_ ( __snake_case : Any , __snake_case : Any="eval" ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Any = os.path.join(__snake_case , F'{split}_results.json' )
if os.path.exists(__snake_case ):
with open(__snake_case , "r" ) as f:
return json.load(__snake_case )
raise ValueError(F'can\'t find {path}' )
__UpperCAmelCase : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _snake_case ( _A ):
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[int] = self.get_auto_remove_tmp_dir()
snake_case__ :int = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_flax_glue.main()
snake_case__ :Optional[int] = get_results(UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
@slow
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case__ :int = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_clm_flax.main()
snake_case__ :Tuple = get_results(UpperCamelCase )
self.assertLess(result["eval_perplexity"] ,100 )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case__ :List[Any] = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_summarization_flax.main()
snake_case__ :str = get_results(UpperCamelCase ,split="test" )
self.assertGreaterEqual(result["test_rouge1"] ,10 )
self.assertGreaterEqual(result["test_rouge2"] ,2 )
self.assertGreaterEqual(result["test_rougeL"] ,7 )
self.assertGreaterEqual(result["test_rougeLsum"] ,7 )
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = self.get_auto_remove_tmp_dir()
snake_case__ :int = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_mlm_flax.main()
snake_case__ :Optional[Any] = get_results(UpperCamelCase )
self.assertLess(result["eval_perplexity"] ,42 )
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Dict = self.get_auto_remove_tmp_dir()
snake_case__ :Any = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_ta_mlm_flax.main()
snake_case__ :Union[str, Any] = get_results(UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.42 )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case__ :Any = 7 if get_gpu_count() > 1 else 2
snake_case__ :Dict = self.get_auto_remove_tmp_dir()
snake_case__ :Any = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_flax_ner.main()
snake_case__ :List[str] = get_results(UpperCamelCase )
self.assertGreaterEqual(result["eval_accuracy"] ,0.75 )
self.assertGreaterEqual(result["eval_f1"] ,0.3 )
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Any = self.get_auto_remove_tmp_dir()
snake_case__ :int = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(UpperCamelCase ,"argv" ,UpperCamelCase ):
run_qa.main()
snake_case__ :Optional[int] = get_results(UpperCamelCase )
self.assertGreaterEqual(result["eval_f1"] ,30 )
self.assertGreaterEqual(result["eval_exact"] ,30 )
| 57
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=2 ,UpperCamelCase=99 ,UpperCamelCase=0 ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=2 ,UpperCamelCase=4 ,UpperCamelCase="last" ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=0 ,) -> int:
snake_case__ :int = parent
snake_case__ :Optional[Any] = batch_size
snake_case__ :Union[str, Any] = seq_length
snake_case__ :Optional[int] = is_training
snake_case__ :Any = use_input_lengths
snake_case__ :Any = use_token_type_ids
snake_case__ :List[Any] = use_labels
snake_case__ :List[Any] = gelu_activation
snake_case__ :Union[str, Any] = sinusoidal_embeddings
snake_case__ :List[str] = causal
snake_case__ :str = asm
snake_case__ :Union[str, Any] = n_langs
snake_case__ :Union[str, Any] = vocab_size
snake_case__ :Optional[Any] = n_special
snake_case__ :List[str] = hidden_size
snake_case__ :Dict = num_hidden_layers
snake_case__ :Tuple = num_attention_heads
snake_case__ :Dict = hidden_dropout_prob
snake_case__ :List[Any] = attention_probs_dropout_prob
snake_case__ :List[Any] = max_position_embeddings
snake_case__ :Optional[Any] = type_sequence_label_size
snake_case__ :Optional[int] = initializer_range
snake_case__ :List[str] = num_labels
snake_case__ :str = num_choices
snake_case__ :Optional[Any] = summary_type
snake_case__ :List[Any] = use_proj
snake_case__ :Optional[int] = scope
snake_case__ :Any = bos_token_id
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :str = None
if self.use_input_lengths:
snake_case__ :Dict = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case__ :Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ :Any = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
snake_case__ :Optional[int] = None
snake_case__ :Optional[Any] = None
snake_case__ :List[str] = None
if self.use_labels:
snake_case__ :List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ :Union[str, Any] = ids_tensor([self.batch_size] ,2 ).float()
snake_case__ :Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ :Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Any:
snake_case__ :Dict = XLMModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Optional[int] = model(UpperCamelCase ,lengths=UpperCamelCase ,langs=UpperCamelCase )
snake_case__ :Any = model(UpperCamelCase ,langs=UpperCamelCase )
snake_case__ :str = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> int:
snake_case__ :Dict = XLMWithLMHeadModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :List[Any] = model(UpperCamelCase ,token_type_ids=UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Tuple:
snake_case__ :Any = XLMForQuestionAnsweringSimple(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :List[Any] = model(UpperCamelCase )
snake_case__ :Any = model(UpperCamelCase ,start_positions=UpperCamelCase ,end_positions=UpperCamelCase )
snake_case__ :List[str] = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Any:
snake_case__ :List[str] = XLMForQuestionAnswering(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = model(UpperCamelCase )
snake_case__ :Union[str, Any] = model(
UpperCamelCase ,start_positions=UpperCamelCase ,end_positions=UpperCamelCase ,cls_index=UpperCamelCase ,is_impossible=UpperCamelCase ,p_mask=UpperCamelCase ,)
snake_case__ :Any = model(
UpperCamelCase ,start_positions=UpperCamelCase ,end_positions=UpperCamelCase ,cls_index=UpperCamelCase ,is_impossible=UpperCamelCase ,)
((snake_case__) , ) :str = result_with_labels.to_tuple()
snake_case__ :Any = model(UpperCamelCase ,start_positions=UpperCamelCase ,end_positions=UpperCamelCase )
((snake_case__) , ) :List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Union[str, Any]:
snake_case__ :List[str] = XLMForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :str = model(UpperCamelCase )
snake_case__ :int = model(UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> Optional[int]:
snake_case__ :Optional[int] = self.num_labels
snake_case__ :List[str] = XLMForTokenClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :int = model(UpperCamelCase ,attention_mask=UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> int:
snake_case__ :str = self.num_choices
snake_case__ :Union[str, Any] = XLMForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :List[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ :List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ :List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
snake_case__ :Optional[int] = model(
UpperCamelCase ,attention_mask=UpperCamelCase ,token_type_ids=UpperCamelCase ,labels=UpperCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) :int = config_and_inputs
snake_case__ :Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _snake_case ( _A , _A , _A , unittest.TestCase ):
_A = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_A = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ) -> int:
snake_case__ :Tuple = super()._prepare_for_class(UpperCamelCase ,UpperCamelCase ,return_labels=UpperCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case__ :Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=UpperCamelCase )
snake_case__ :List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=UpperCamelCase )
return inputs_dict
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Any = XLMModelTester(self )
snake_case__ :Union[str, Any] = ConfigTester(self ,config_class=UpperCamelCase ,emb_dim=37 )
def lowerCAmelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ,UpperCamelCase=1 ) -> Tuple:
self.assertIsInstance(UpperCamelCase ,UpperCamelCase )
self.assertListEqual(
[isinstance(UpperCamelCase ,UpperCamelCase ) for iter_attentions in attentions] ,[True] * len(UpperCamelCase ) )
self.assertEqual(len(UpperCamelCase ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCamelCase ):
# adds PAD dummy token
snake_case__ :List[str] = min_length + idx + 1
snake_case__ :Dict = min_length + idx + 1
snake_case__ :List[str] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(UpperCamelCase ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=False ,UpperCamelCase=1 ) -> Any:
self.assertIsInstance(UpperCamelCase ,UpperCamelCase )
self.assertListEqual(
[isinstance(UpperCamelCase ,UpperCamelCase ) for iter_hidden_states in hidden_states] ,[True] * len(UpperCamelCase ) ,)
self.assertEqual(len(UpperCamelCase ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCamelCase ):
# adds PAD dummy token
snake_case__ :Dict = min_length + idx + 1
snake_case__ :str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(UpperCamelCase ) ,)
pass
@slow
def lowerCAmelCase_ ( self ) -> str:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :str = XLMModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[int] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(UpperCamelCase )
snake_case__ :Optional[int] = torch.tensor([[14, 447]] ,dtype=torch.long ,device=UpperCamelCase ) # the president
snake_case__ :str = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case__ :Optional[Any] = model.generate(UpperCamelCase ,do_sample=UpperCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,UpperCamelCase )
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=100 ,UpperCamelCase=13 ,UpperCamelCase=30 ,UpperCamelCase=2 ,UpperCamelCase=3 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=32 ,UpperCamelCase=5 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=10 ,UpperCamelCase=0.02 ,UpperCamelCase=3 ,) -> Optional[int]:
snake_case__ :Optional[int] = parent
snake_case__ :Any = vocab_size
snake_case__ :Any = batch_size
snake_case__ :Union[str, Any] = image_size
snake_case__ :Dict = patch_size
snake_case__ :Optional[Any] = num_channels
snake_case__ :Optional[int] = is_training
snake_case__ :Optional[Any] = use_labels
snake_case__ :Union[str, Any] = hidden_size
snake_case__ :Optional[Any] = num_hidden_layers
snake_case__ :Optional[int] = num_attention_heads
snake_case__ :str = intermediate_size
snake_case__ :int = hidden_act
snake_case__ :str = hidden_dropout_prob
snake_case__ :List[str] = attention_probs_dropout_prob
snake_case__ :Optional[Any] = type_sequence_label_size
snake_case__ :int = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ :Optional[int] = (image_size // patch_size) ** 2
snake_case__ :int = num_patches + 1
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :Any = None
if self.use_labels:
snake_case__ :int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ :Tuple = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Union[str, Any] = FlaxBeitModel(config=UpperCamelCase )
snake_case__ :Tuple = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Optional[Any] = FlaxBeitForMaskedImageModeling(config=UpperCamelCase )
snake_case__ :Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Any = self.type_sequence_label_size
snake_case__ :List[Any] = FlaxBeitForImageClassification(config=UpperCamelCase )
snake_case__ :Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ :str = 1
snake_case__ :Optional[Any] = FlaxBeitForImageClassification(UpperCamelCase )
snake_case__ :List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ :Optional[Any] = model(UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) :List[Any] = config_and_inputs
snake_case__ :str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( _A , unittest.TestCase ):
_A = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCAmelCase_ ( self ) -> None:
snake_case__ :List[str] = FlaxBeitModelTester(self )
snake_case__ :List[Any] = ConfigTester(self ,config_class=UpperCamelCase ,has_text_modality=UpperCamelCase ,hidden_size=37 )
def lowerCAmelCase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ , snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Optional[int] = model_class(UpperCamelCase )
snake_case__ :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Dict = [*signature.parameters.keys()]
snake_case__ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ , snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case__ :List[Any] = self._prepare_for_class(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = model_class(UpperCamelCase )
@jax.jit
def model_jitted(UpperCamelCase ,**UpperCamelCase ):
return model(pixel_values=UpperCamelCase ,**UpperCamelCase )
with self.subTest("JIT Enabled" ):
snake_case__ :Optional[Any] = model_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case__ :Optional[int] = model_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase ,UpperCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
snake_case__ :str = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
snake_case__ :List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCamelCase )
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
snake_case__ :Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :int = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
snake_case__ :Any = self.default_image_processor
snake_case__ :Union[str, Any] = prepare_img()
snake_case__ :Any = image_processor(images=UpperCamelCase ,return_tensors="np" ).pixel_values
# prepare bool_masked_pos
snake_case__ :Any = np.ones((1, 196) ,dtype=UpperCamelCase )
# forward pass
snake_case__ :Optional[int] = model(pixel_values=UpperCamelCase ,bool_masked_pos=UpperCamelCase )
snake_case__ :int = outputs.logits
# verify the logits
snake_case__ :Optional[Any] = (1, 196, 8_192)
self.assertEqual(logits.shape ,UpperCamelCase )
snake_case__ :Any = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,UpperCamelCase ,atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[int] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
snake_case__ :Optional[int] = self.default_image_processor
snake_case__ :List[str] = prepare_img()
snake_case__ :str = image_processor(images=UpperCamelCase ,return_tensors="np" )
# forward pass
snake_case__ :Dict = model(**UpperCamelCase )
snake_case__ :Optional[Any] = outputs.logits
# verify the logits
snake_case__ :List[str] = (1, 1_000)
self.assertEqual(logits.shape ,UpperCamelCase )
snake_case__ :Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,UpperCamelCase ,atol=1E-4 ) )
snake_case__ :Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() ,UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Dict = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
snake_case__ :Union[str, Any] = self.default_image_processor
snake_case__ :List[Any] = prepare_img()
snake_case__ :Dict = image_processor(images=UpperCamelCase ,return_tensors="np" )
# forward pass
snake_case__ :Any = model(**UpperCamelCase )
snake_case__ :List[Any] = outputs.logits
# verify the logits
snake_case__ :int = (1, 21_841)
self.assertEqual(logits.shape ,UpperCamelCase )
snake_case__ :List[Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,UpperCamelCase ,atol=1E-4 ) )
snake_case__ :Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,UpperCamelCase )
| 57
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _snake_case ( unittest.TestCase ):
_A = JukeboxTokenizer
_A = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def lowerCAmelCase_ ( self ) -> str:
import torch
snake_case__ :Dict = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
snake_case__ :int = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case__ :List[str] = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCAmelCase_ ( self ) -> Dict:
import torch
snake_case__ :Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
snake_case__ :Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
snake_case__ :Optional[int] = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] ,EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] ,EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] ,EXPECTED_OUTPUT[2] ) )
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
from collections import defaultdict
def lowercase_ ( __snake_case : int ) -> int:
'''simple docstring'''
snake_case__ :List[Any] = 1
snake_case__ :int = True
for v in tree[start]:
if v not in visited:
ret += dfs(__snake_case )
if ret % 2 == 0:
cuts.append(__snake_case )
return ret
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase : Tuple = 1_0, 9
__UpperCAmelCase : Tuple = defaultdict(list)
__UpperCAmelCase : dict[int, bool] = {}
__UpperCAmelCase : list[int] = []
__UpperCAmelCase : int = 0
__UpperCAmelCase : str = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 57
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__UpperCAmelCase : int = datasets.utils.logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = ["names", "prefix"]
__UpperCAmelCase : Dict = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__UpperCAmelCase : Optional[int] = ["encoding_errors", "on_bad_lines"]
__UpperCAmelCase : Optional[int] = ["date_format"]
@dataclass
class _snake_case ( datasets.BuilderConfig ):
_A = ","
_A = None
_A = "infer"
_A = None
_A = None
_A = None
_A = None
_A = None
_A = True
_A = None
_A = None
_A = None
_A = None
_A = False
_A = None
_A = None
_A = None
_A = True
_A = True
_A = False
_A = True
_A = None
_A = "."
_A = None
_A = '"'
_A = 0
_A = None
_A = None
_A = None
_A = None
_A = True
_A = True
_A = 0
_A = True
_A = False
_A = None
_A = 10000
_A = None
_A = "strict"
_A = "error"
_A = None
def lowerCAmelCase_ ( self ) -> str:
if self.delimiter is not None:
snake_case__ :str = self.delimiter
if self.column_names is not None:
snake_case__ :List[str] = self.column_names
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _snake_case ( datasets.ArrowBasedBuilder ):
_A = CsvConfig
def lowerCAmelCase_ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
snake_case__ :Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase ,(str, list, tuple) ):
snake_case__ :Dict = data_files
if isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :Dict = [files]
snake_case__ :Dict = [dl_manager.iter_files(UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
snake_case__ :List[Any] = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :int = [files]
snake_case__ :Union[str, Any] = [dl_manager.iter_files(UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase ,gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> pa.Table:
if self.config.features is not None:
snake_case__ :str = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
snake_case__ :List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case__ :Optional[int] = table_cast(UpperCamelCase ,UpperCamelCase )
return pa_table
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Dict = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case__ :str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ):
snake_case__ :Optional[Any] = pd.read_csv(UpperCamelCase ,iterator=UpperCamelCase ,dtype=UpperCamelCase ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCamelCase ):
snake_case__ :Union[str, Any] = pa.Table.from_pandas(UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}' )
raise
| 57
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _snake_case :
_A = XGLMConfig
_A = {}
_A = 'gelu'
def __init__( self ,UpperCamelCase ,UpperCamelCase=14 ,UpperCamelCase=7 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=99 ,UpperCamelCase=32 ,UpperCamelCase=2 ,UpperCamelCase=4 ,UpperCamelCase=37 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=0.02 ,) -> Optional[int]:
snake_case__ :List[str] = parent
snake_case__ :List[Any] = batch_size
snake_case__ :Optional[Any] = seq_length
snake_case__ :Any = is_training
snake_case__ :List[str] = use_input_mask
snake_case__ :List[Any] = use_labels
snake_case__ :str = vocab_size
snake_case__ :Dict = d_model
snake_case__ :str = num_hidden_layers
snake_case__ :Optional[int] = num_attention_heads
snake_case__ :List[str] = ffn_dim
snake_case__ :Any = activation_function
snake_case__ :Union[str, Any] = activation_dropout
snake_case__ :List[str] = attention_dropout
snake_case__ :Optional[int] = max_position_embeddings
snake_case__ :List[str] = initializer_range
snake_case__ :Any = None
snake_case__ :str = 0
snake_case__ :List[Any] = 2
snake_case__ :List[Any] = 1
def lowerCAmelCase_ ( self ) -> int:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
snake_case__ :Union[str, Any] = None
if self.use_input_mask:
snake_case__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ :Union[str, Any] = self.get_config()
snake_case__ :Tuple = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase_ ( self ) -> Dict:
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=UpperCamelCase ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=UpperCamelCase ,)
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) :List[str] = config_and_inputs
snake_case__ :List[Any] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_A = (TFXGLMForCausalLM,) if is_tf_available() else ()
_A = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[Any] = TFXGLMModelTester(self )
snake_case__ :Optional[int] = ConfigTester(self ,config_class=UpperCamelCase ,n_embd=37 )
def lowerCAmelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :List[Any] = TFXGLMModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def lowerCAmelCase_ ( self ) -> List[str]:
super().test_resize_token_embeddings()
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ,UpperCamelCase=True ) -> str:
snake_case__ :str = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
snake_case__ :Optional[Any] = tf.convert_to_tensor([[2, 268, 9_865]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ :Any = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
snake_case__ :Dict = model.generate(UpperCamelCase ,do_sample=UpperCamelCase ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
snake_case__ :Tuple = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
snake_case__ :Optional[int] = tokenizer("Today is a nice day and" ,return_tensors="tf" )
snake_case__ :int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
snake_case__ :Tuple = model.generate(UpperCamelCase ,do_sample=UpperCamelCase ,seed=[7, 0] )
snake_case__ :str = tokenizer.decode(output_ids[0] ,skip_special_tokens=UpperCamelCase )
snake_case__ :str = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCamelCase ,UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
snake_case__ :str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
snake_case__ :Any = "left"
# use different length sentences to test batching
snake_case__ :List[str] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
snake_case__ :Optional[int] = tokenizer(UpperCamelCase ,return_tensors="tf" ,padding=UpperCamelCase )
snake_case__ :Dict = inputs["input_ids"]
snake_case__ :Any = model.generate(input_ids=UpperCamelCase ,attention_mask=inputs["attention_mask"] ,max_new_tokens=12 )
snake_case__ :Any = tokenizer(sentences[0] ,return_tensors="tf" ).input_ids
snake_case__ :str = model.generate(input_ids=UpperCamelCase ,max_new_tokens=12 )
snake_case__ :Union[str, Any] = tokenizer(sentences[1] ,return_tensors="tf" ).input_ids
snake_case__ :str = model.generate(input_ids=UpperCamelCase ,max_new_tokens=12 )
snake_case__ :List[Any] = tokenizer.batch_decode(UpperCamelCase ,skip_special_tokens=UpperCamelCase )
snake_case__ :Optional[int] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=UpperCamelCase )
snake_case__ :List[str] = tokenizer.decode(output_padded[0] ,skip_special_tokens=UpperCamelCase )
snake_case__ :Any = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
self.assertListEqual(UpperCamelCase ,[non_padded_sentence, padded_sentence] )
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
import requests
from bsa import BeautifulSoup
def lowercase_ ( __snake_case : str = "AAPL" ) -> str:
'''simple docstring'''
snake_case__ :Dict = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
snake_case__ :int = BeautifulSoup(requests.get(__snake_case ).text , "html.parser" )
snake_case__ :int = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 57
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 1
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
def lowercase_ ( __snake_case : Tuple ) -> Any:
'''simple docstring'''
snake_case__ :Dict = 1
snake_case__ :Any = 2
while i * i <= n:
snake_case__ :List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase_ ( ) -> int:
'''simple docstring'''
snake_case__ :List[str] = 1
snake_case__ :Optional[int] = 1
while True:
i += 1
t_num += i
if count_divisors(__snake_case ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 1
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
_A = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = 50_257 ,UpperCamelCase = 1_024 ,UpperCamelCase = 768 ,UpperCamelCase = 12 ,UpperCamelCase = 12 ,UpperCamelCase = None ,UpperCamelCase = "gelu_new" ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 1E-5 ,UpperCamelCase = 0.02 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
snake_case__ :List[str] = prefix_inner_dim
snake_case__ :List[Any] = prefix_hidden_dim
snake_case__ :List[str] = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case__ :Union[str, Any] = (
nn.Linear(self.prefix_hidden_dim ,UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case__ :Tuple = GPTaConfig(
vocab_size=UpperCamelCase ,n_positions=UpperCamelCase ,n_embd=UpperCamelCase ,n_layer=UpperCamelCase ,n_head=UpperCamelCase ,n_inner=UpperCamelCase ,activation_function=UpperCamelCase ,resid_pdrop=UpperCamelCase ,embd_pdrop=UpperCamelCase ,attn_pdrop=UpperCamelCase ,layer_norm_epsilon=UpperCamelCase ,initializer_range=UpperCamelCase ,scale_attn_weights=UpperCamelCase ,use_cache=UpperCamelCase ,scale_attn_by_inverse_layer_idx=UpperCamelCase ,reorder_and_upcast_attn=UpperCamelCase ,)
snake_case__ :Dict = GPTaLMHeadModel(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,) -> List[str]:
snake_case__ :str = self.transformer.transformer.wte(UpperCamelCase )
snake_case__ :Optional[int] = self.encode_prefix(UpperCamelCase )
snake_case__ :Optional[Any] = self.decode_prefix(UpperCamelCase )
snake_case__ :List[Any] = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
snake_case__ :Optional[Any] = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
snake_case__ :Any = torch.cat((dummy_token, input_ids) ,dim=1 )
snake_case__ :Dict = self.transformer(inputs_embeds=UpperCamelCase ,labels=UpperCamelCase ,attention_mask=UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> torch.Tensor:
return torch.zeros(UpperCamelCase ,self.prefix_length ,dtype=torch.intaa ,device=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
return self.encode_prefix(UpperCamelCase )
@torch.no_grad()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Optional[Any] = torch.split(UpperCamelCase ,1 ,dim=0 )
snake_case__ :Tuple = []
snake_case__ :int = []
for feature in features:
snake_case__ :str = self.decode_prefix(feature.to(UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case__ , snake_case__ :List[Any] = self.generate_beam(
input_embeds=UpperCamelCase ,device=UpperCamelCase ,eos_token_id=UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case__ :List[str] = torch.stack(UpperCamelCase )
snake_case__ :Tuple = torch.stack(UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase = 5 ,UpperCamelCase = 67 ,UpperCamelCase = 1.0 ,UpperCamelCase = None ,) -> Any:
snake_case__ :str = eos_token_id
snake_case__ :Any = None
snake_case__ :Optional[Any] = None
snake_case__ :str = torch.ones(UpperCamelCase ,device=UpperCamelCase ,dtype=torch.int )
snake_case__ :Dict = torch.zeros(UpperCamelCase ,device=UpperCamelCase ,dtype=torch.bool )
if input_embeds is not None:
snake_case__ :List[Any] = input_embeds
else:
snake_case__ :Union[str, Any] = self.transformer.transformer.wte(UpperCamelCase )
for i in range(UpperCamelCase ):
snake_case__ :Tuple = self.transformer(inputs_embeds=UpperCamelCase )
snake_case__ :Tuple = outputs.logits
snake_case__ :int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case__ :List[Any] = logits.softmax(-1 ).log()
if scores is None:
snake_case__ , snake_case__ :str = logits.topk(UpperCamelCase ,-1 )
snake_case__ :List[str] = generated.expand(UpperCamelCase ,*generated.shape[1:] )
snake_case__ , snake_case__ :List[str] = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
snake_case__ :Union[str, Any] = next_tokens
else:
snake_case__ :List[Any] = tokens.expand(UpperCamelCase ,*tokens.shape[1:] )
snake_case__ :Optional[int] = torch.cat((tokens, next_tokens) ,dim=1 )
else:
snake_case__ :Union[str, Any] = -float(np.inf )
snake_case__ :Union[str, Any] = 0
snake_case__ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case__ :int = scores_sum / seq_lengths[:, None]
snake_case__ , snake_case__ :List[str] = scores_sum_average.view(-1 ).topk(UpperCamelCase ,-1 )
snake_case__ :Tuple = next_tokens // scores_sum.shape[1]
snake_case__ :Any = seq_lengths[next_tokens_source]
snake_case__ :Optional[int] = next_tokens % scores_sum.shape[1]
snake_case__ :Union[str, Any] = next_tokens.unsqueeze(1 )
snake_case__ :Optional[Any] = tokens[next_tokens_source]
snake_case__ :List[str] = torch.cat((tokens, next_tokens) ,dim=1 )
snake_case__ :str = generated[next_tokens_source]
snake_case__ :Tuple = scores_sum_average * seq_lengths
snake_case__ :List[str] = is_stopped[next_tokens_source]
snake_case__ :Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
snake_case__ :List[Any] = torch.cat((generated, next_token_embed) ,dim=1 )
snake_case__ :Tuple = is_stopped + next_tokens.eq(UpperCamelCase ).squeeze()
if is_stopped.all():
break
snake_case__ :List[Any] = scores / seq_lengths
snake_case__ :Optional[int] = scores.argsort(descending=UpperCamelCase )
# tokens tensors are already padded to max_seq_length
snake_case__ :Optional[int] = [tokens[i] for i in order]
snake_case__ :List[Any] = torch.stack(UpperCamelCase ,dim=0 )
snake_case__ :Dict = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 57
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase : int = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
from math import factorial
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :Any = real
if isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :Union[str, Any] = [1] * rank
else:
snake_case__ :Tuple = rank
def __repr__( self ) -> List[str]:
return (
f'{self.real}+'
f'{"+".join(str(UpperCamelCase )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :str = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,UpperCamelCase )
def __add__( self ,UpperCamelCase ) -> Dict:
if not isinstance(UpperCamelCase ,UpperCamelCase ):
return Dual(self.real + other ,self.duals )
snake_case__ :Union[str, Any] = self.duals.copy()
snake_case__ :Dict = other.duals.copy()
if len(UpperCamelCase ) > len(UpperCamelCase ):
o_dual.extend([1] * (len(UpperCamelCase ) - len(UpperCamelCase )) )
elif len(UpperCamelCase ) < len(UpperCamelCase ):
s_dual.extend([1] * (len(UpperCamelCase ) - len(UpperCamelCase )) )
snake_case__ :Dict = []
for i in range(len(UpperCamelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,UpperCamelCase )
_A = __add__
def __sub__( self ,UpperCamelCase ) -> List[Any]:
return self + other * -1
def __mul__( self ,UpperCamelCase ) -> Any:
if not isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :Any = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,UpperCamelCase )
snake_case__ :List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,UpperCamelCase )
_A = __mul__
def __truediv__( self ,UpperCamelCase ) -> Optional[int]:
if not isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,UpperCamelCase )
raise ValueError
def __floordiv__( self ,UpperCamelCase ) -> int:
if not isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :int = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,UpperCamelCase )
raise ValueError
def __pow__( self ,UpperCamelCase ) -> Dict:
if n < 0 or isinstance(UpperCamelCase ,UpperCamelCase ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
snake_case__ :Tuple = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase_ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if not callable(__snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__snake_case , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__snake_case , __snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
snake_case__ :str = Dual(__snake_case , 1 )
snake_case__ :Optional[Any] = func(__snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase_ ( __snake_case : str ) -> Optional[int]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 57
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 1
|
from random import randint, random
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : bool = False , __snake_case : bool = False , __snake_case : int = 5 , ) -> list:
'''simple docstring'''
snake_case__ :int = [[-1] * number_of_cells] # Create a highway without any car
snake_case__ :List[str] = 0
snake_case__ :Any = max(__snake_case , 0 )
while i < number_of_cells:
snake_case__ :Optional[int] = (
randint(0 , __snake_case ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowercase_ ( __snake_case : list , __snake_case : int ) -> int:
'''simple docstring'''
snake_case__ :Optional[Any] = 0
snake_case__ :Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(__snake_case ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__snake_case , -1 )
def lowercase_ ( __snake_case : list , __snake_case : float , __snake_case : int ) -> list:
'''simple docstring'''
snake_case__ :List[str] = len(__snake_case )
# Beforce calculations, the highway is empty
snake_case__ :List[Any] = [-1] * number_of_cells
for car_index in range(__snake_case ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
snake_case__ :Union[str, Any] = min(highway_now[car_index] + 1 , __snake_case )
# Number of empty cell before the next car
snake_case__ :Any = get_distance(__snake_case , __snake_case ) - 1
# We can't have the car causing an accident
snake_case__ :Tuple = min(next_highway[car_index] , __snake_case )
if random() < probability:
# Randomly, a driver will slow down
snake_case__ :Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowercase_ ( __snake_case : list , __snake_case : int , __snake_case : float , __snake_case : int ) -> list:
'''simple docstring'''
snake_case__ :Optional[int] = len(highway[0] )
for i in range(__snake_case ):
snake_case__ :List[str] = update(highway[i] , __snake_case , __snake_case )
snake_case__ :int = [-1] * number_of_cells
for car_index in range(__snake_case ):
snake_case__ :List[str] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
snake_case__ :Dict = (car_index + speed) % number_of_cells
# Commit the change of position
snake_case__ :int = speed
highway.append(__snake_case )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__UpperCAmelCase : Optional[int] = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
__UpperCAmelCase : List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
__UpperCAmelCase : int = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCAmelCase_ ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) ,homepage="https://github.com/hendrycks/math" ,codebase_urls=["https://github.com/hendrycks/math"] ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[Any]:
snake_case__ :Dict = 0.0
for i, j in zip(UpperCamelCase ,UpperCamelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(UpperCamelCase ,UpperCamelCase ) else 0.0
snake_case__ :Dict = n_correct / len(UpperCamelCase )
return {
"accuracy": accuracy,
}
| 57
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Any = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 1
|
def lowercase_ ( __snake_case : int ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
snake_case__ :Dict = gray_code_sequence_string(__snake_case )
#
# convert them to integers
for i in range(len(__snake_case ) ):
snake_case__ :Optional[int] = int(sequence[i] , 2 )
return sequence
def lowercase_ ( __snake_case : int ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case__ :Optional[int] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case__ :Tuple = gray_code_sequence_string(bit_count - 1 )
snake_case__ :int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case__ :List[Any] = "0" + smaller_sequence[i]
sequence.append(__snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case__ :str = "1" + smaller_sequence[i]
sequence.append(__snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 1
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__UpperCAmelCase : Dict = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__UpperCAmelCase : Optional[int] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Dict = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__UpperCAmelCase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Tuple = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__UpperCAmelCase : Optional[Any] = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : List[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__UpperCAmelCase : List[str] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : List[str] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__UpperCAmelCase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__UpperCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__UpperCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Any = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__UpperCAmelCase : int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__UpperCAmelCase : List[str] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : int = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__UpperCAmelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__UpperCAmelCase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__UpperCAmelCase : int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__UpperCAmelCase : List[str] = ""
__UpperCAmelCase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__UpperCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : str = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Dict ) -> Tuple:
'''simple docstring'''
assert ReadMe.from_string(__snake_case , __snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
snake_case__ :Dict = ReadMe.from_string(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : List[str] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(__snake_case , __snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
ReadMe.from_string(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowercase_ ( __snake_case : Any , __snake_case : int ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
snake_case__ :Optional[int] = ReadMe.from_readme(__snake_case , __snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :Dict = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
snake_case__ :Tuple = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
snake_case__ :List[str] = ReadMe.from_readme(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : Any , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
snake_case__ :Tuple = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
ReadMe.from_readme(__snake_case , __snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : Any ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :Optional[Any] = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
ReadMe.from_readme(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
| 57
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=18 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,) -> List[str]:
snake_case__ :Optional[Any] = size if size is not None else {"height": 18, "width": 18}
snake_case__ :Dict = parent
snake_case__ :str = batch_size
snake_case__ :int = num_channels
snake_case__ :Optional[Any] = image_size
snake_case__ :str = min_resolution
snake_case__ :Optional[Any] = max_resolution
snake_case__ :List[str] = do_resize
snake_case__ :Dict = size
snake_case__ :List[str] = apply_ocr
def lowerCAmelCase_ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( _A , unittest.TestCase ):
_A = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Dict = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase ,"apply_ocr" ) )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
snake_case__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def lowerCAmelCase_ ( self ) -> int:
pass
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input
snake_case__ :Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
self.assertIsInstance(encoding.words ,UpperCamelCase )
self.assertIsInstance(encoding.boxes ,UpperCamelCase )
# Test batched
snake_case__ :Union[str, Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
snake_case__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case__ :Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
snake_case__ :Dict = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case__ :Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
snake_case__ :int = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# with apply_OCR = True
snake_case__ :str = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ :Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" ,split="test" )
snake_case__ :str = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case__ :List[Any] = image_processing(UpperCamelCase ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ :str = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
snake_case__ :Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,UpperCamelCase )
self.assertListEqual(encoding.boxes ,UpperCamelCase )
# with apply_OCR = False
snake_case__ :Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase )
snake_case__ :Union[str, Any] = image_processing(UpperCamelCase ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 57
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowercase_ ( *__snake_case : int ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
snake_case__ :List[str] = list(__snake_case )
for i in range(len(__snake_case ) ):
snake_case__ :int = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowercase_ ( __snake_case : Exception ) -> bool:
'''simple docstring'''
snake_case__ :List[str] = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__snake_case , __snake_case ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowercase_ ( __snake_case : callable = None , __snake_case : int = 1_28 ) -> str:
'''simple docstring'''
if function is None:
return functools.partial(__snake_case , starting_batch_size=__snake_case )
snake_case__ :Tuple = starting_batch_size
def decorator(*__snake_case : List[Any] , **__snake_case : str ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
snake_case__ :List[str] = list(inspect.signature(__snake_case ).parameters.keys() )
# Guard against user error
if len(__snake_case ) < (len(__snake_case ) + 1):
snake_case__ :Any = ", ".join([F'{arg}={value}' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'Batch size was passed into `{function.__name__}` as the first argument when called.'
F'Remove this as the decorator already does so: `{function.__name__}({arg_str})`' )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__snake_case , *__snake_case , **__snake_case )
except Exception as e:
if should_reduce_batch_size(__snake_case ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 57
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _snake_case ( _A ):
_A = 'vit_mae'
def __init__( self ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-12 ,UpperCamelCase=224 ,UpperCamelCase=16 ,UpperCamelCase=3 ,UpperCamelCase=True ,UpperCamelCase=16 ,UpperCamelCase=512 ,UpperCamelCase=8 ,UpperCamelCase=2_048 ,UpperCamelCase=0.75 ,UpperCamelCase=False ,**UpperCamelCase ,) -> int:
super().__init__(**UpperCamelCase )
snake_case__ :Dict = hidden_size
snake_case__ :List[Any] = num_hidden_layers
snake_case__ :int = num_attention_heads
snake_case__ :Any = intermediate_size
snake_case__ :Tuple = hidden_act
snake_case__ :List[Any] = hidden_dropout_prob
snake_case__ :Any = attention_probs_dropout_prob
snake_case__ :List[Any] = initializer_range
snake_case__ :Union[str, Any] = layer_norm_eps
snake_case__ :Optional[Any] = image_size
snake_case__ :Union[str, Any] = patch_size
snake_case__ :Dict = num_channels
snake_case__ :Optional[int] = qkv_bias
snake_case__ :Any = decoder_num_attention_heads
snake_case__ :List[str] = decoder_hidden_size
snake_case__ :Any = decoder_num_hidden_layers
snake_case__ :int = decoder_intermediate_size
snake_case__ :List[Any] = mask_ratio
snake_case__ :Optional[Any] = norm_pix_loss
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
import numpy
# List of input, output pairs
__UpperCAmelCase : int = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
__UpperCAmelCase : int = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
__UpperCAmelCase : List[Any] = [2, 4, 1, 5]
__UpperCAmelCase : Optional[Any] = len(train_data)
__UpperCAmelCase : Tuple = 0.009
def lowercase_ ( __snake_case : int , __snake_case : int="train" ) -> Any:
'''simple docstring'''
return calculate_hypothesis_value(__snake_case , __snake_case ) - output(
__snake_case , __snake_case )
def lowercase_ ( __snake_case : int ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = 0
for i in range(len(__snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowercase_ ( __snake_case : Any , __snake_case : Tuple ) -> str:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowercase_ ( __snake_case : str , __snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=m ) -> Tuple:
'''simple docstring'''
snake_case__ :Union[str, Any] = 0
for i in range(__snake_case ):
if index == -1:
summation_value += _error(__snake_case )
else:
summation_value += _error(__snake_case ) * train_data[i][0][index]
return summation_value
def lowercase_ ( __snake_case : List[str] ) -> Any:
'''simple docstring'''
snake_case__ :str = summation_of_cost_derivative(__snake_case , __snake_case ) / m
return cost_derivative_value
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
snake_case__ :Dict = 0.0_0_0_0_0_2
snake_case__ :List[str] = 0
snake_case__ :Tuple = 0
while True:
j += 1
snake_case__ :str = [0, 0, 0, 0]
for i in range(0 , len(__snake_case ) ):
snake_case__ :Any = get_cost_derivative(i - 1 )
snake_case__ :str = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__snake_case , __snake_case , atol=__snake_case , rtol=__snake_case , ):
break
snake_case__ :List[str] = temp_parameter_vector
print(("Number of iterations:", j) )
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
for i in range(len(__snake_case ) ):
print(("Actual output value:", output(__snake_case , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__snake_case , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 1
|
# using dfs for finding eulerian path traversal
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Tuple=None ) -> Dict:
'''simple docstring'''
snake_case__ :int = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case__ , snake_case__ :Optional[int] = True, True
snake_case__ :List[Any] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
return path
def lowercase_ ( __snake_case : Dict , __snake_case : int ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Any = 0
snake_case__ :Union[str, Any] = -1
for i in range(__snake_case ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case__ :Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case__ :str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case__ , snake_case__ :Tuple = check_circuit_or_path(__snake_case , __snake_case )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
snake_case__ :Tuple = 1
if check == 2:
snake_case__ :Union[str, Any] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
snake_case__ :str = dfs(__snake_case , __snake_case , __snake_case )
print(__snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case__ :Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case__ :Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case__ :Union[str, Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case__ :Tuple = {
1: [],
2: []
# all degree is zero
}
snake_case__ :List[Any] = 10
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
check_euler(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = "▁"
__UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase : str = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
__UpperCAmelCase : Optional[Any] = {
"xlm-roberta-base": 5_1_2,
"xlm-roberta-large": 5_1_2,
"xlm-roberta-large-finetuned-conll02-dutch": 5_1_2,
"xlm-roberta-large-finetuned-conll02-spanish": 5_1_2,
"xlm-roberta-large-finetuned-conll03-english": 5_1_2,
"xlm-roberta-large-finetuned-conll03-german": 5_1_2,
}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self ,UpperCamelCase ,UpperCamelCase="<s>" ,UpperCamelCase="</s>" ,UpperCamelCase="</s>" ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase = None ,**UpperCamelCase ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ :Optional[int] = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else mask_token
snake_case__ :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase ,eos_token=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,cls_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**UpperCamelCase ,)
snake_case__ :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
snake_case__ :List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ :List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ :Tuple = 1
snake_case__ :str = len(self.sp_model ) + self.fairseq_offset
snake_case__ :Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
snake_case__ :Optional[int] = self.__dict__.copy()
snake_case__ :str = None
snake_case__ :List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Tuple = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
snake_case__ :str = {}
snake_case__ :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ :Dict = [self.cls_token_id]
snake_case__ :Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :str = [self.sep_token_id]
snake_case__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Dict = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]:
return self.sp_model.encode(UpperCamelCase ,out_type=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ :List[str] = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Dict = "".join(UpperCamelCase ).replace(UpperCamelCase ," " ).strip()
return out_string
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case__ :List[Any] = os.path.join(
UpperCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase ,"wb" ) as fi:
snake_case__ :List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
| 57
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _snake_case :
_A = 42
_A = None
_A = None
def lowercase_ ( ) -> Node | None:
'''simple docstring'''
snake_case__ :str = Node(1 )
snake_case__ :Any = Node(2 )
snake_case__ :Any = Node(3 )
snake_case__ :Optional[int] = Node(4 )
snake_case__ :Optional[Any] = Node(5 )
return tree
def lowercase_ ( __snake_case : Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowercase_ ( __snake_case : Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowercase_ ( __snake_case : Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowercase_ ( __snake_case : Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
snake_case__ :list[Any] = []
if root is None:
return output
snake_case__ :Tuple = deque([root] )
while process_queue:
snake_case__ :int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> Sequence[Node | None]:
'''simple docstring'''
snake_case__ :list[Any] = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> Sequence[Node | None]:
'''simple docstring'''
snake_case__ :list[Any] = []
def populate_output(__snake_case : Node | None , __snake_case : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def lowercase_ ( __snake_case : Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
snake_case__ :list[Sequence[Node | None]] = []
snake_case__ :List[str] = 0
snake_case__ :List[Any] = height(__snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) )
snake_case__ :str = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) )
snake_case__ :Union[str, Any] = 0
return output
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Optional[Any] = make_tree()
print(F'In-order Traversal: {inorder(__snake_case )}' )
print(F'Pre-order Traversal: {preorder(__snake_case )}' )
print(F'Post-order Traversal: {postorder(__snake_case )}' , "\n" )
print(F'Height of Tree: {height(__snake_case )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(__snake_case ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(__snake_case ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) )
print("\nZigZag order Traversal: " )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 1
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case__ :Tuple = json.loads(__snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case__ :Dict = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case__ :List[str] = json.loads(__snake_case )
if not mpi_options.get("sagemaker_mpi_enabled" , __snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _snake_case ( _A ):
_A = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowerCAmelCase_ ( self ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." ,UpperCamelCase ,)
@cached_property
def lowerCAmelCase_ ( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
snake_case__ :int = torch.device("cpu" )
snake_case__ :Any = 0
elif is_sagemaker_model_parallel_available():
snake_case__ :str = smp.local_rank()
snake_case__ :Dict = torch.device("cuda" ,UpperCamelCase )
snake_case__ :str = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" ,timeout=self.ddp_timeout_delta )
snake_case__ :str = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
snake_case__ :Dict = torch.device("cuda" ,self.local_rank )
snake_case__ :Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case__ :Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case__ :Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" ,timeout=self.ddp_timeout_delta )
snake_case__ :Optional[int] = torch.device("cuda" ,self.local_rank )
snake_case__ :Optional[Any] = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCamelCase )
return device
@property
def lowerCAmelCase_ ( self ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCAmelCase_ ( self ) -> Tuple:
return not is_sagemaker_model_parallel_available()
@property
def lowerCAmelCase_ ( self ) -> Optional[int]:
return False
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
from datetime import datetime
import requests
def lowercase_ ( __snake_case : str ) -> bytes:
'''simple docstring'''
snake_case__ :int = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
snake_case__ :List[Any] = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(__snake_case ).content
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = input("Enter Video/IGTV url: ").strip()
__UpperCAmelCase : Union[str, Any] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 57
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 1
|
from __future__ import annotations
def lowercase_ ( __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> int: # noqa: E741
'''simple docstring'''
while r - l > 1:
snake_case__ :Optional[Any] = (l + r) // 2
if v[m] >= key:
snake_case__ :Optional[Any] = m
else:
snake_case__ :str = m # noqa: E741
return r
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if len(__snake_case ) == 0:
return 0
snake_case__ :List[str] = [0] * len(__snake_case )
snake_case__ :Optional[int] = 1
snake_case__ :Union[str, Any] = v[0]
for i in range(1 , len(__snake_case ) ):
if v[i] < tail[0]:
snake_case__ :Any = v[i]
elif v[i] > tail[length - 1]:
snake_case__ :Optional[Any] = v[i]
length += 1
else:
snake_case__ :Optional[int] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowercase_ ( __snake_case : str ) -> List[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
snake_case__ :List[Any] = json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(".pt" ):
snake_case__ :Any = args.output + ".pt"
snake_case__ :Optional[Any] = OrderedDict()
with tf.device("/CPU:0" ):
snake_case__ :Union[str, Any] = tf.train.load_checkpoint(args.tf_model_dir )
snake_case__ :List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case__ :Tuple = reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
snake_case__ :List[str] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
snake_case__ :List[str] = 8
snake_case__ :Union[str, Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ :List[Any] = torch.tensor(__snake_case )
elif key_name.startswith("model/moe" ):
snake_case__ :Tuple = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
snake_case__ :List[str] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
snake_case__ :int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ :Any = torch.tensor(__snake_case )
elif key_name.endswith("/softmlp/kernel" ):
snake_case__ :Tuple = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
snake_case__ :List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ :List[str] = torch.tensor(__snake_case )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
snake_case__ :List[str] = key_name[-9:-7]
for i in range(16 ):
snake_case__ :List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
snake_case__ :Optional[int] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case__ :Optional[int] = torch.tensor(__snake_case )
elif key_name.startswith("model/mlp" ):
snake_case__ :Any = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
snake_case__ :Dict = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
snake_case__ :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ :Tuple = torch.tensor(__snake_case )
elif key_name.endswith("/p1/bias" ):
snake_case__ :Dict = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
snake_case__ :Any = vnp.copy() # same because it is one dimensional
snake_case__ :Union[str, Any] = torch.tensor(__snake_case )
elif key_name.endswith("/p2/kernel" ):
snake_case__ :Optional[Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
snake_case__ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ :str = torch.tensor(__snake_case )
elif key_name.endswith("/p2/bias" ):
snake_case__ :Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
snake_case__ :Optional[Any] = vnp.copy() # same because it is one dimensional
snake_case__ :List[str] = torch.tensor(__snake_case )
elif key_name.startswith("model/ln" ):
snake_case__ :Optional[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case__ :Tuple = "model.blocks.%d.feed_forward.norm.bias" % player
snake_case__ :List[str] = vnp.copy() # same because it is one dimensional
snake_case__ :Dict = torch.tensor(__snake_case )
elif key_name.endswith("/g" ):
snake_case__ :Optional[int] = "model.blocks.%d.feed_forward.norm.weight" % player
snake_case__ :str = vnp.copy() # same because it is one dimensional
snake_case__ :Optional[int] = torch.tensor(__snake_case )
elif key_name.startswith("model/att" ):
snake_case__ :Tuple = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
snake_case__ :List[str] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case__ :str = state[:, 0, :, :]
snake_case__ :Optional[int] = state[:, 1, :, :]
snake_case__ :Any = state[:, 2, :, :]
snake_case__ :Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ :Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ :List[str] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ :int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
snake_case__ :List[Any] = torch.tensor(__snake_case )
snake_case__ :Optional[int] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
snake_case__ :Union[str, Any] = torch.tensor(__snake_case )
snake_case__ :Any = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
snake_case__ :int = torch.tensor(__snake_case )
elif key_name.endswith("/o/kernel" ):
snake_case__ :Dict = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
snake_case__ :Union[str, Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ :Tuple = torch.tensor(__snake_case )
elif key_name.startswith("model/an" ):
snake_case__ :int = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
snake_case__ :str = "model.blocks.%d.self_attn.norm.bias" % player
snake_case__ :Tuple = vnp.copy() # same because it is one dimensional
snake_case__ :Tuple = torch.tensor(__snake_case )
elif key_name.endswith("/g" ):
snake_case__ :Any = "model.blocks.%d.self_attn.norm.weight" % player
snake_case__ :List[str] = vnp.copy() # same because it is one dimensional
snake_case__ :Optional[Any] = torch.tensor(__snake_case )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
snake_case__ :List[str] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
snake_case__ :Optional[Any] = "model.%s.weight" % nlayer
snake_case__ :str = vnp.copy() # same in embedded
snake_case__ :str = torch.tensor(__snake_case )
if key_name.startswith("model/wte" ):
snake_case__ :str = "lm_head.weight"
snake_case__ :str = vnp.copy() # same in embedded
snake_case__ :Optional[int] = torch.tensor(__snake_case )
elif key_name.startswith("model/wob" ):
snake_case__ :List[str] = "final_logits_bias"
snake_case__ :Any = vnp.copy() # same in embedded
snake_case__ :int = state.reshape((1, -1) )
snake_case__ :List[str] = torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
snake_case__ :Optional[int] = "model.last_project.weight"
snake_case__ :Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ :Tuple = torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
snake_case__ :str = "model.last_project.bias"
snake_case__ :List[str] = vnp.copy() # same because it is one dimensional
snake_case__ :int = torch.tensor(__snake_case )
torch.save(__snake_case , args.output )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase : str = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase_ ( __snake_case : str ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__snake_case )
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ :Union[str, Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__snake_case , id=__snake_case )
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _snake_case :
def __init__( self ,UpperCamelCase = "cpu" ,UpperCamelCase = "openai/clip-vit-large-patch14" ) -> None:
snake_case__ :str = device
snake_case__ :Union[str, Any] = CLIPTokenizerFast.from_pretrained(UpperCamelCase )
snake_case__ :Any = [0.48145466, 0.4578275, 0.40821073]
snake_case__ :Any = [0.26862954, 0.26130258, 0.27577711]
snake_case__ :Optional[int] = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
snake_case__ :str = torchvision.transforms.Resize(224 )
snake_case__ :Optional[int] = torchvision.transforms.CenterCrop(224 )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Any:
snake_case__ :str = self.resize(UpperCamelCase )
snake_case__ :List[str] = self.center_crop(UpperCamelCase )
snake_case__ :List[str] = self.normalize(UpperCamelCase )
return images
def __call__( self ,UpperCamelCase=None ,UpperCamelCase=None ,**UpperCamelCase ) -> Union[str, Any]:
snake_case__ :Optional[Any] = self.tokenizer(text=UpperCamelCase ,**UpperCamelCase )
snake_case__ :Tuple = self.preprocess_img(UpperCamelCase )
snake_case__ :Optional[Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _snake_case ( nn.Module ):
def __init__( self ,UpperCamelCase=10 ,UpperCamelCase=0.01 ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase="image" ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=False ,UpperCamelCase=False ,) -> None:
super().__init__()
snake_case__ :Optional[Any] = None
snake_case__ :Any = device if device else get_device()
if vqgan:
snake_case__ :Tuple = vqgan
else:
snake_case__ :Optional[Any] = load_vqgan(self.device ,conf_path=UpperCamelCase ,ckpt_path=UpperCamelCase )
self.vqgan.eval()
if clip:
snake_case__ :List[str] = clip
else:
snake_case__ :Optional[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
snake_case__ :Optional[int] = ProcessorGradientFlow(device=self.device )
snake_case__ :Union[str, Any] = iterations
snake_case__ :str = lr
snake_case__ :List[Any] = log
snake_case__ :List[str] = make_grid
snake_case__ :List[Any] = return_val
snake_case__ :Union[str, Any] = quantize
snake_case__ :List[Any] = self.vqgan.decoder.z_shape
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=5 ,UpperCamelCase=True ) -> Union[str, Any]:
snake_case__ :Union[str, Any] = []
if output_path is None:
snake_case__ :Dict = "./animation.gif"
if input_path is None:
snake_case__ :Optional[Any] = self.save_path
snake_case__ :Union[str, Any] = sorted(glob(input_path + "/*" ) )
if not len(UpperCamelCase ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(UpperCamelCase ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
snake_case__ :List[str] = total_duration / len(UpperCamelCase )
snake_case__ :str = [frame_duration] * len(UpperCamelCase )
if extend_frames:
snake_case__ :Dict = 1.5
snake_case__ :List[str] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(UpperCamelCase ) )
imageio.mimsave(UpperCamelCase ,UpperCamelCase ,duration=UpperCamelCase )
print(f'gif saved to {output_path}' )
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ) -> str:
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
snake_case__ :Dict = preprocess(Image.open(UpperCamelCase ) ,target_image_size=256 ).to(self.device )
snake_case__ :Tuple = preprocess_vqgan(UpperCamelCase )
snake_case__ , *snake_case__ :int = self.vqgan.encode(UpperCamelCase )
return z
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
snake_case__ :Tuple = self.latent.detach().requires_grad_()
snake_case__ :Dict = base_latent + transform_vector
if self.quantize:
snake_case__ , *snake_case__ :str = self.vqgan.quantize(UpperCamelCase )
else:
snake_case__ :int = trans_latent
return self.vqgan.decode(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ) -> Union[str, Any]:
snake_case__ :Tuple = self.clip_preprocessor(text=UpperCamelCase ,images=UpperCamelCase ,return_tensors="pt" ,padding=UpperCamelCase )
snake_case__ :Tuple = self.clip(**UpperCamelCase )
snake_case__ :Optional[Any] = clip_outputs.logits_per_image
if weights is not None:
snake_case__ :str = similarity_logits * weights
return similarity_logits.sum()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :List[str] = self._get_clip_similarity(pos_prompts["prompts"] ,UpperCamelCase ,weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
snake_case__ :Tuple = self._get_clip_similarity(neg_prompts["prompts"] ,UpperCamelCase ,weights=neg_prompts["weights"] )
else:
snake_case__ :str = torch.tensor([1] ,device=self.device )
snake_case__ :Optional[int] = -torch.log(UpperCamelCase ) + torch.log(UpperCamelCase )
return loss
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Any:
snake_case__ :int = torch.randn_like(self.latent ,requires_grad=UpperCamelCase ,device=self.device )
snake_case__ :Any = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case__ :Tuple = self._add_vector(UpperCamelCase )
snake_case__ :Union[str, Any] = loop_post_process(UpperCamelCase )
snake_case__ :Optional[Any] = self._get_CLIP_loss(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
print("CLIP loss" ,UpperCamelCase )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
wandb.init(reinit=UpperCamelCase ,project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
snake_case__ :List[str] = Image.open(UpperCamelCase )
snake_case__ :int = image.resize((256, 256) )
wandb.log("Original Image" ,wandb.Image(UpperCamelCase ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
if not prompts:
return []
snake_case__ :int = []
snake_case__ :Any = []
if isinstance(UpperCamelCase ,UpperCamelCase ):
snake_case__ :int = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(UpperCamelCase ,(tuple, list) ):
snake_case__ :List[Any] = prompt[0]
snake_case__ :List[Any] = float(prompt[1] )
elif ":" in prompt:
snake_case__ , snake_case__ :str = prompt.split(":" )
snake_case__ :int = float(UpperCamelCase )
else:
snake_case__ :int = prompt
snake_case__ :Tuple = 1.0
processed_prompts.append(UpperCamelCase )
weights.append(UpperCamelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase ,device=self.device ),
}
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=None ,) -> Union[str, Any]:
if image_path:
snake_case__ :Tuple = self._get_latent(UpperCamelCase )
else:
snake_case__ :str = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case__ :Dict = self.process_prompts(UpperCamelCase )
snake_case__ :Optional[Any] = self.process_prompts(UpperCamelCase )
if save_final and save_path is None:
snake_case__ :Any = os.path.join("./outputs/" ,"_".join(pos_prompts["prompts"] ) )
if not os.path.exists(UpperCamelCase ):
os.makedirs(UpperCamelCase )
else:
snake_case__ :str = save_path + "_" + get_timestamp()
os.makedirs(UpperCamelCase )
snake_case__ :Any = save_path
snake_case__ :List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(UpperCamelCase ) )
snake_case__ :int = loop_post_process(UpperCamelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) ):
if show_intermediate:
show_pil(UpperCamelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"Image": wandb.Image(UpperCamelCase )} )
if show_final:
show_pil(UpperCamelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path ,f'iter_{iter:03d}_final.png' ) )
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( _A , unittest.TestCase ):
_A = ConsistencyModelPipeline
_A = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_A = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_A = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Tuple = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" ,subfolder="test_unet" ,)
return unet
@property
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Tuple = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" ,subfolder="test_unet_class_cond" ,)
return unet
def lowerCAmelCase_ ( self ,UpperCamelCase=False ) -> Tuple:
if class_cond:
snake_case__ :Tuple = self.dummy_cond_unet
else:
snake_case__ :Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case__ :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
snake_case__ :Tuple = {
"unet": unet,
"scheduler": scheduler,
}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> str:
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :List[str] = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :Any = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :Any = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Optional[Any] = self.get_dummy_components()
snake_case__ :List[Any] = ConsistencyModelPipeline(**UpperCamelCase )
snake_case__ :Tuple = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :List[Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ :Optional[int] = image[0, -3:, -3:, -1]
snake_case__ :List[str] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Tuple = self.get_dummy_components(class_cond=UpperCamelCase )
snake_case__ :Tuple = ConsistencyModelPipeline(**UpperCamelCase )
snake_case__ :Dict = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :List[Any] = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Optional[Any] = 0
snake_case__ :int = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ :int = image[0, -3:, -3:, -1]
snake_case__ :str = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :List[str] = self.get_dummy_components()
snake_case__ :Any = ConsistencyModelPipeline(**UpperCamelCase )
snake_case__ :Tuple = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :int = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Optional[Any] = 1
snake_case__ :str = None
snake_case__ :Dict = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ :Dict = image[0, -3:, -3:, -1]
snake_case__ :List[Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ :Union[str, Any] = self.get_dummy_components(class_cond=UpperCamelCase )
snake_case__ :Dict = ConsistencyModelPipeline(**UpperCamelCase )
snake_case__ :Any = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Tuple = self.get_dummy_inputs(UpperCamelCase )
snake_case__ :Optional[Any] = 1
snake_case__ :List[Any] = None
snake_case__ :Dict = 0
snake_case__ :Dict = pipe(**UpperCamelCase ).images
assert image.shape == (1, 32, 32, 3)
snake_case__ :Any = image[0, -3:, -3:, -1]
snake_case__ :Optional[int] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ,UpperCamelCase=False ,UpperCamelCase="cpu" ,UpperCamelCase=torch.floataa ,UpperCamelCase=(1, 3, 64, 64) ) -> List[Any]:
snake_case__ :str = torch.manual_seed(UpperCamelCase )
snake_case__ :str = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
snake_case__ :Any = self.get_fixed_latents(seed=UpperCamelCase ,device=UpperCamelCase ,dtype=UpperCamelCase ,shape=UpperCamelCase )
snake_case__ :Union[str, Any] = latents
return inputs
def lowerCAmelCase_ ( self ,UpperCamelCase=0 ,UpperCamelCase="cpu" ,UpperCamelCase=torch.floataa ,UpperCamelCase=(1, 3, 64, 64) ) -> Any:
if type(UpperCamelCase ) == str:
snake_case__ :int = torch.device(UpperCamelCase )
snake_case__ :List[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :Tuple = randn_tensor(UpperCamelCase ,generator=UpperCamelCase ,device=UpperCamelCase ,dtype=UpperCamelCase )
return latents
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
snake_case__ :Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
snake_case__ :int = ConsistencyModelPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :List[str] = self.get_inputs()
snake_case__ :List[Any] = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ :Tuple = image[0, -3:, -3:, -1]
snake_case__ :List[str] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
snake_case__ :Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
snake_case__ :Union[str, Any] = ConsistencyModelPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Union[str, Any] = self.get_inputs()
snake_case__ :str = 1
snake_case__ :Tuple = None
snake_case__ :str = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ :List[str] = image[0, -3:, -3:, -1]
snake_case__ :Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
snake_case__ :Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
snake_case__ :Union[str, Any] = ConsistencyModelPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Any = self.get_inputs(get_fixed_latents=UpperCamelCase ,device=UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase ,enable_math=UpperCamelCase ,enable_mem_efficient=UpperCamelCase ):
snake_case__ :Any = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ :Tuple = image[0, -3:, -3:, -1]
snake_case__ :List[str] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" ,subfolder="diffusers_cd_imagenet64_l2" )
snake_case__ :int = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.002 ,sigma_max=80.0 ,)
snake_case__ :Dict = ConsistencyModelPipeline(unet=UpperCamelCase ,scheduler=UpperCamelCase )
pipe.to(torch_device=UpperCamelCase ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :List[Any] = self.get_inputs(get_fixed_latents=UpperCamelCase ,device=UpperCamelCase )
snake_case__ :Tuple = 1
snake_case__ :int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase ,enable_math=UpperCamelCase ,enable_mem_efficient=UpperCamelCase ):
snake_case__ :Any = pipe(**UpperCamelCase ).images
assert image.shape == (1, 64, 64, 3)
snake_case__ :Dict = image[0, -3:, -3:, -1]
snake_case__ :Tuple = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 57
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 1
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__UpperCAmelCase : List[Any] = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
_A = None
_A = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
_A = datasets.Audio()
_A = 'audio'
_A = AudioFolderConfig
_A = 42 # definition at the bottom of the script
_A = AudioClassification(audio_column='audio' , label_column='label' )
__UpperCAmelCase : Optional[int] = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__UpperCAmelCase : List[str] = AUDIO_EXTENSIONS
| 57
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase : Dict = random.Random()
if is_torch_available():
import torch
def lowercase_ ( __snake_case : int , __snake_case : Optional[Any]=1.0 , __snake_case : Optional[int]=None , __snake_case : int=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
snake_case__ :List[str] = global_rng
snake_case__ :str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=400 ,UpperCamelCase=2_000 ,UpperCamelCase=1 ,UpperCamelCase=0.0 ,UpperCamelCase=16_000 ,UpperCamelCase=True ,UpperCamelCase=True ,) -> Any:
snake_case__ :List[str] = parent
snake_case__ :Optional[Any] = batch_size
snake_case__ :List[str] = min_seq_length
snake_case__ :str = max_seq_length
snake_case__ :Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ :List[str] = feature_size
snake_case__ :Optional[Any] = padding_value
snake_case__ :List[Any] = sampling_rate
snake_case__ :Optional[Any] = return_attention_mask
snake_case__ :List[str] = do_normalize
def lowerCAmelCase_ ( self ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase_ ( self ,UpperCamelCase=False ,UpperCamelCase=False ) -> Optional[Any]:
def _flatten(UpperCamelCase ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
snake_case__ :Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case__ :Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
snake_case__ :List[str] = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( _A , unittest.TestCase ):
_A = ASTFeatureExtractor
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Optional[int] = ASTFeatureExtractionTester(self )
def lowerCAmelCase_ ( self ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ :List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
snake_case__ :Optional[int] = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case__ :List[str] = feat_extract(speech_inputs[0] ,return_tensors="np" ).input_values
snake_case__ :List[str] = feat_extract(np_speech_inputs[0] ,return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
# Test batched
snake_case__ :Dict = feat_extract(UpperCamelCase ,padding=UpperCamelCase ,return_tensors="np" ).input_values
snake_case__ :List[Any] = feat_extract(UpperCamelCase ,padding=UpperCamelCase ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ :Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ :Dict = np.asarray(UpperCamelCase )
snake_case__ :Tuple = feat_extract(UpperCamelCase ,return_tensors="np" ).input_values
snake_case__ :Optional[Any] = feat_extract(UpperCamelCase ,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase ,UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase ,UpperCamelCase ,atol=1E-3 ) )
@require_torch
def lowerCAmelCase_ ( self ) -> Tuple:
import torch
snake_case__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ :List[Any] = np.random.rand(100 ).astype(np.floataa )
snake_case__ :Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ :List[Any] = feature_extractor.pad([{"input_values": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case__ :Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> str:
from datasets import load_dataset
snake_case__ :Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
snake_case__ :Union[str, Any] = ds.sort("id" ).select(range(UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# fmt: off
snake_case__ :Optional[int] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
snake_case__ :Union[str, Any] = self._load_datasamples(1 )
snake_case__ :Any = ASTFeatureExtractor()
snake_case__ :str = feature_extractor(UpperCamelCase ,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape ,(1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] ,UpperCamelCase ,atol=1E-4 ) )
| 57
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=18 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=True ,UpperCamelCase=32 ,UpperCamelCase=True ,) -> Union[str, Any]:
snake_case__ :Tuple = parent
snake_case__ :Dict = batch_size
snake_case__ :Any = num_channels
snake_case__ :int = image_size
snake_case__ :Optional[Any] = min_resolution
snake_case__ :Tuple = max_resolution
snake_case__ :Optional[int] = do_resize
snake_case__ :List[Any] = size_divisor
snake_case__ :Optional[int] = do_rescale
def lowerCAmelCase_ ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _snake_case ( _A , unittest.TestCase ):
_A = GLPNImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Optional[Any] = GLPNImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size_divisor" ) )
self.assertTrue(hasattr(UpperCamelCase ,"resample" ) )
self.assertTrue(hasattr(UpperCamelCase ,"do_rescale" ) )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
snake_case__ :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ :Optional[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase_ ( self ) -> int:
# Initialize image_processing
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ :int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase_ ( self ) -> str:
# Initialize image_processing
snake_case__ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case__ :List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 57
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 1
|
from __future__ import annotations
from random import random
class _snake_case :
def __init__( self ,UpperCamelCase = None ) -> Union[str, Any]:
snake_case__ :Optional[Any] = value
snake_case__ :Optional[int] = random()
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1 )
def __str__( self ) -> str:
snake_case__ :List[Any] = str(self.value ) + " "
snake_case__ :List[Any] = str(self.left or "" )
snake_case__ :Dict = str(self.right or "" )
return value + left + right
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
snake_case__ , snake_case__ :Union[str, Any] = split(root.left , __snake_case )
return left, root
else:
snake_case__ , snake_case__ :int = split(root.right , __snake_case )
return root, right
def lowercase_ ( __snake_case : Node | None , __snake_case : Node | None ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
snake_case__ :List[str] = merge(left.right , __snake_case )
return left
else:
snake_case__ :Dict = merge(__snake_case , right.left )
return right
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> Node | None:
'''simple docstring'''
snake_case__ :Optional[int] = Node(__snake_case )
snake_case__ , snake_case__ :Dict = split(__snake_case , __snake_case )
return merge(merge(__snake_case , __snake_case ) , __snake_case )
def lowercase_ ( __snake_case : Node | None , __snake_case : int ) -> Node | None:
'''simple docstring'''
snake_case__ , snake_case__ :List[str] = split(__snake_case , value - 1 )
snake_case__ , snake_case__ :str = split(__snake_case , __snake_case )
return merge(__snake_case , __snake_case )
def lowercase_ ( __snake_case : Node | None ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowercase_ ( __snake_case : Node | None , __snake_case : str ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
snake_case__ :Optional[Any] = insert(__snake_case , int(arg[1:] ) )
elif arg[0] == "-":
snake_case__ :List[str] = erase(__snake_case , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :Optional[int] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
snake_case__ :List[Any] = input()
while args != "q":
snake_case__ :List[str] = interact_treap(__snake_case , __snake_case )
print(__snake_case )
snake_case__ :int = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _snake_case ( _A ):
_A = 'umt5'
_A = ['past_key_values']
def __init__( self ,UpperCamelCase=250_112 ,UpperCamelCase=512 ,UpperCamelCase=64 ,UpperCamelCase=1_024 ,UpperCamelCase=8 ,UpperCamelCase=None ,UpperCamelCase=6 ,UpperCamelCase=32 ,UpperCamelCase=128 ,UpperCamelCase=0.1 ,UpperCamelCase=1E-6 ,UpperCamelCase=1.0 ,UpperCamelCase="gated-gelu" ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase="T5Tokenizer" ,UpperCamelCase=True ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=0 ,**UpperCamelCase ,) -> Optional[Any]:
super().__init__(
is_encoder_decoder=UpperCamelCase ,tokenizer_class=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,pad_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,decoder_start_token_id=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :str = vocab_size
snake_case__ :Any = d_model
snake_case__ :Optional[Any] = d_kv
snake_case__ :Dict = d_ff
snake_case__ :Dict = num_layers
snake_case__ :Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case__ :Union[str, Any] = num_heads
snake_case__ :Optional[Any] = relative_attention_num_buckets
snake_case__ :Any = relative_attention_max_distance
snake_case__ :List[Any] = dropout_rate
snake_case__ :Optional[int] = layer_norm_epsilon
snake_case__ :Optional[int] = initializer_factor
snake_case__ :Dict = feed_forward_proj
snake_case__ :List[str] = use_cache
snake_case__ :Any = self.feed_forward_proj.split("-" )
snake_case__ :Any = act_info[-1]
snake_case__ :Optional[int] = act_info[0] == "gated"
if len(UpperCamelCase ) > 1 and act_info[0] != "gated" or len(UpperCamelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
snake_case__ :Any = "gelu_new"
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return self.d_model
@property
def lowerCAmelCase_ ( self ) -> int:
return self.num_heads
@property
def lowerCAmelCase_ ( self ) -> str:
return self.num_layers
class _snake_case ( _A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
snake_case__ :List[str] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
snake_case__ :Tuple = "past_encoder_sequence + sequence"
snake_case__ :str = {0: "batch"}
snake_case__ :Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case__ :List[str] = {0: "batch", 1: "decoder_sequence"}
snake_case__ :Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase ,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase_ ( self ) -> int:
return 13
@property
def lowerCAmelCase_ ( self ) -> float:
return 5E-4
| 57
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 1
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 1
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowercase_ ( __snake_case : Tuple ) -> List[str]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> List[Any]:
snake_case__ :Any = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" ,type=UpperCamelCase ,default=UpperCamelCase ,help="Path to location to store the models" )
download_parser.add_argument(
"--force" ,action="store_true" ,help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" ,action="store_true" ,help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" ,)
download_parser.add_argument("model" ,type=UpperCamelCase ,help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :List[Any] = model
snake_case__ :int = cache
snake_case__ :Union[str, Any] = force
snake_case__ :Dict = trust_remote_code
def lowerCAmelCase_ ( self ) -> Tuple:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 57
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _snake_case ( _A ):
_A = 'distilbert'
_A = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self ,UpperCamelCase=30_522 ,UpperCamelCase=512 ,UpperCamelCase=False ,UpperCamelCase=6 ,UpperCamelCase=12 ,UpperCamelCase=768 ,UpperCamelCase=4 * 768 ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase="gelu" ,UpperCamelCase=0.02 ,UpperCamelCase=0.1 ,UpperCamelCase=0.2 ,UpperCamelCase=0 ,**UpperCamelCase ,) -> List[Any]:
snake_case__ :str = vocab_size
snake_case__ :Union[str, Any] = max_position_embeddings
snake_case__ :List[str] = sinusoidal_pos_embds
snake_case__ :Tuple = n_layers
snake_case__ :Optional[Any] = n_heads
snake_case__ :Dict = dim
snake_case__ :List[str] = hidden_dim
snake_case__ :str = dropout
snake_case__ :int = attention_dropout
snake_case__ :Any = activation
snake_case__ :List[Any] = initializer_range
snake_case__ :Optional[int] = qa_dropout
snake_case__ :Union[str, Any] = seq_classif_dropout
super().__init__(**UpperCamelCase ,pad_token_id=UpperCamelCase )
class _snake_case ( _A ):
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ :Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ :List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 57
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = 32 ,UpperCamelCase = True ,UpperCamelCase = 1 / 255 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = [0.48145466, 0.4578275, 0.40821073] ,UpperCamelCase = [0.26862954, 0.26130258, 0.27577711] ,UpperCamelCase = True ,UpperCamelCase=7 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=3 ,) -> Any:
snake_case__ :Union[str, Any] = parent
snake_case__ :str = do_resize
snake_case__ :str = size if size is not None else {"shortest_edge": 288}
snake_case__ :Tuple = size_divisor
snake_case__ :Optional[int] = do_rescale
snake_case__ :Tuple = rescale_factor
snake_case__ :List[Any] = do_normalize
snake_case__ :List[str] = do_center_crop
snake_case__ :List[Any] = image_mean
snake_case__ :Optional[int] = image_std
snake_case__ :Any = do_pad
snake_case__ :List[str] = batch_size
snake_case__ :Tuple = num_channels
snake_case__ :List[str] = min_resolution
snake_case__ :Union[str, Any] = max_resolution
def lowerCAmelCase_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=False ) -> Optional[int]:
if not batched:
snake_case__ :int = self.size["shortest_edge"]
snake_case__ :List[str] = image_inputs[0]
if isinstance(UpperCamelCase ,Image.Image ):
snake_case__ , snake_case__ :Optional[Any] = image.size
else:
snake_case__ , snake_case__ :Dict = image.shape[1], image.shape[2]
snake_case__ :Any = size / min(UpperCamelCase ,UpperCamelCase )
if h < w:
snake_case__ , snake_case__ :Union[str, Any] = size, scale * w
else:
snake_case__ , snake_case__ :Dict = scale * h, size
snake_case__ :Union[str, Any] = int((1_333 / 800) * size )
if max(UpperCamelCase ,UpperCamelCase ) > max_size:
snake_case__ :int = max_size / max(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = newh * scale
snake_case__ :List[Any] = neww * scale
snake_case__ , snake_case__ :Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
snake_case__ , snake_case__ :Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case__ :Union[str, Any] = []
for image in image_inputs:
snake_case__ , snake_case__ :Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ :Tuple = max(UpperCamelCase ,key=lambda UpperCamelCase : item[0] )[0]
snake_case__ :List[Any] = max(UpperCamelCase ,key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( _A , unittest.TestCase ):
_A = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :str = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase ,"image_std" ) )
self.assertTrue(hasattr(UpperCamelCase ,"do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size_divisor" ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self ) -> List[Any]:
# Initialize image processor
snake_case__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input
snake_case__ :Union[str, Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ :Optional[Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
snake_case__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case__ :str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :int = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ :Optional[int] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image processor
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case__ :List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
snake_case__ :str = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ :List[str] = self.image_processor_tester.get_expected_values(UpperCamelCase ,batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
| 57
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _A , unittest.TestCase ):
_A = CodeGenTokenizer
_A = CodeGenTokenizerFast
_A = True
_A = {'add_prefix_space': True}
_A = False
def lowerCAmelCase_ ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ :Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
snake_case__ :List[str] = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
snake_case__ :Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ :str = {"unk_token": "<unk>"}
snake_case__ :Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ :str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
snake_case__ :Any = "lower newer"
snake_case__ :Dict = "lower newer"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
snake_case__ :Optional[Any] = "lower newer"
snake_case__ :Optional[int] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
snake_case__ :List[str] = tokenizer.tokenize(UpperCamelCase ,add_prefix_space=UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
snake_case__ :List[str] = tokens + [tokenizer.unk_token]
snake_case__ :List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Any:
if not self.test_rust_tokenizer:
return
snake_case__ :str = self.get_tokenizer()
snake_case__ :List[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
snake_case__ :Tuple = "lower newer"
# Testing tokenization
snake_case__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase ,add_prefix_space=UpperCamelCase )
snake_case__ :Optional[int] = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
# Testing conversion to ids without special tokens
snake_case__ :Dict = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
snake_case__ :List[Any] = rust_tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
# Testing conversion to ids with special tokens
snake_case__ :Any = self.get_rust_tokenizer(add_prefix_space=UpperCamelCase )
snake_case__ :str = tokenizer.encode(UpperCamelCase ,add_prefix_space=UpperCamelCase )
snake_case__ :int = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
# Testing the unknown token
snake_case__ :Tuple = tokens + [rust_tokenizer.unk_token]
snake_case__ :int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
def lowerCAmelCase_ ( self ,*UpperCamelCase ,**UpperCamelCase ) -> Union[str, Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCAmelCase_ ( self ,UpperCamelCase=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case__ :Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
# Simple input
snake_case__ :List[str] = "This is a simple input"
snake_case__ :int = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ :Dict = ("This is a simple input", "This is a pair")
snake_case__ :Dict = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCamelCase ,tokenizer_r.encode ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Simple input
self.assertRaises(UpperCamelCase ,tokenizer_r.encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Simple input
self.assertRaises(
UpperCamelCase ,tokenizer_r.batch_encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" ,)
# Pair input
self.assertRaises(UpperCamelCase ,tokenizer_r.encode ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Pair input
self.assertRaises(UpperCamelCase ,tokenizer_r.encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" )
# Pair input
self.assertRaises(
UpperCamelCase ,tokenizer_r.batch_encode_plus ,UpperCamelCase ,max_length=UpperCamelCase ,padding="max_length" ,)
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
snake_case__ :List[Any] = "This is a simple input"
snake_case__ :Union[str, Any] = ["This is a simple input looooooooong", "This is a simple input"]
snake_case__ :int = ("This is a simple input", "This is a pair")
snake_case__ :Union[str, Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
snake_case__ :Tuple = tokenizer.pad_token_id
snake_case__ :Optional[Any] = tokenizer(UpperCamelCase ,padding="max_length" ,max_length=30 ,return_tensors="np" )
snake_case__ :List[Any] = tokenizer(UpperCamelCase ,padding=UpperCamelCase ,truncate=UpperCamelCase ,return_tensors="np" )
snake_case__ :Any = tokenizer(*UpperCamelCase ,padding="max_length" ,max_length=60 ,return_tensors="np" )
snake_case__ :List[str] = tokenizer(UpperCamelCase ,padding=UpperCamelCase ,truncate=UpperCamelCase ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = "$$$"
snake_case__ :Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=UpperCamelCase ,add_bos_token=UpperCamelCase )
snake_case__ :int = "This is a simple input"
snake_case__ :Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ :Any = tokenizer.bos_token_id
snake_case__ :Optional[Any] = tokenizer(UpperCamelCase )
snake_case__ :int = tokenizer(UpperCamelCase )
self.assertEqual(out_s.input_ids[0] ,UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ :int = tokenizer.decode(out_s.input_ids )
snake_case__ :Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :List[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
snake_case__ :List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
snake_case__ :str = "\nif len_a > len_b: result = a\nelse: result = b"
snake_case__ :str = tokenizer.encode(UpperCamelCase )
snake_case__ :Dict = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
snake_case__ :str = tokenizer.decode(UpperCamelCase ,truncate_before_pattern=UpperCamelCase )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
pass
| 57
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 1
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase : int = {
"google/realm-cc-news-pretrained-embedder": 5_1_2,
"google/realm-cc-news-pretrained-encoder": 5_1_2,
"google/realm-cc-news-pretrained-scorer": 5_1_2,
"google/realm-cc-news-pretrained-openqa": 5_1_2,
"google/realm-orqa-nq-openqa": 5_1_2,
"google/realm-orqa-nq-reader": 5_1_2,
"google/realm-orqa-wq-openqa": 5_1_2,
"google/realm-orqa-wq-reader": 5_1_2,
}
__UpperCAmelCase : List[Any] = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = RealmTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=True ,UpperCamelCase="[UNK]" ,UpperCamelCase="[SEP]" ,UpperCamelCase="[PAD]" ,UpperCamelCase="[CLS]" ,UpperCamelCase="[MASK]" ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> Optional[Any]:
super().__init__(
UpperCamelCase ,tokenizer_file=UpperCamelCase ,do_lower_case=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,pad_token=UpperCamelCase ,cls_token=UpperCamelCase ,mask_token=UpperCamelCase ,tokenize_chinese_chars=UpperCamelCase ,strip_accents=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" ,UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,UpperCamelCase ) != tokenize_chinese_chars
):
snake_case__ :str = getattr(UpperCamelCase ,normalizer_state.pop("type" ) )
snake_case__ :Tuple = do_lower_case
snake_case__ :Union[str, Any] = strip_accents
snake_case__ :List[Any] = tokenize_chinese_chars
snake_case__ :Any = normalizer_class(**UpperCamelCase )
snake_case__ :Any = do_lower_case
def lowerCAmelCase_ ( self ,UpperCamelCase ,**UpperCamelCase ) -> List[Any]:
snake_case__ :Optional[int] = PaddingStrategy.MAX_LENGTH
snake_case__ :Any = text
snake_case__ :List[Any] = kwargs.pop("text_pair" ,UpperCamelCase )
snake_case__ :Optional[int] = kwargs.pop("return_tensors" ,UpperCamelCase )
snake_case__ :List[Any] = {
"input_ids": [],
"attention_mask": [],
"token_type_ids": [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
snake_case__ :Tuple = batch_text_pair[idx]
else:
snake_case__ :str = None
snake_case__ :Optional[Any] = super().__call__(UpperCamelCase ,UpperCamelCase ,return_tensors=UpperCamelCase ,**UpperCamelCase )
snake_case__ :Union[str, Any] = encoded_candidates.get("input_ids" )
snake_case__ :Optional[Any] = encoded_candidates.get("attention_mask" )
snake_case__ :int = encoded_candidates.get("token_type_ids" )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
snake_case__ :str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase ,tensor_type=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=None ) -> str:
snake_case__ :List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :List[str] = [self.sep_token_id]
snake_case__ :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :Tuple = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCAmelCase : Optional[int] = False
try:
__UpperCAmelCase : str = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class _snake_case :
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = [] ) -> Tuple:
snake_case__ :Any = 0
snake_case__ :Optional[Any] = choices
snake_case__ :Union[str, Any] = prompt
if sys.platform == "win32":
snake_case__ :Optional[int] = "*"
else:
snake_case__ :Optional[int] = "➔ "
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = "" ) -> Optional[Any]:
if sys.platform != "win32":
writeColor(self.choices[index] ,32 ,UpperCamelCase )
else:
forceWrite(self.choices[index] ,UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(UpperCamelCase )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = 1 ) -> Tuple:
snake_case__ :Tuple = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCamelCase )
move_cursor(UpperCamelCase ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase_ ( self ) -> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase_ ( self ) -> int:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase_ ( self ) -> Any:
move_cursor(len(self.choices ) - self.position ,"DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase_ ( self ) -> Any:
move_cursor(len(self.choices ) - self.position ,"DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCamelCase )] for number in range(10 )] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :List[Any] = int(chr(self.current_selection ) )
snake_case__ :str = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,UpperCamelCase )
else:
return
else:
return
def lowerCAmelCase_ ( self ,UpperCamelCase = 0 ) -> int:
if self.prompt:
linebreak()
forceWrite(self.prompt ,"\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" ,"\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" ,"\n" )
snake_case__ :str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCamelCase )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position ,"UP" )
with cursor.hide():
while True:
if in_colab:
try:
snake_case__ :Dict = int(builtins.input() )
except ValueError:
snake_case__ :Dict = default_choice
else:
snake_case__ :List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,"UP" )
clear_line()
self.write_choice(UpperCamelCase ,"\n" )
return choice
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="resnet50" ,UpperCamelCase=3 ,UpperCamelCase=32 ,UpperCamelCase=3 ,UpperCamelCase=True ,UpperCamelCase=True ,) -> Optional[Any]:
snake_case__ :List[str] = parent
snake_case__ :Dict = out_indices if out_indices is not None else [4]
snake_case__ :List[str] = stage_names
snake_case__ :str = out_features
snake_case__ :Union[str, Any] = backbone
snake_case__ :int = batch_size
snake_case__ :List[str] = image_size
snake_case__ :Optional[Any] = num_channels
snake_case__ :Any = use_pretrained_backbone
snake_case__ :Tuple = is_training
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :List[str] = self.get_config()
return config, pixel_values
def lowerCAmelCase_ ( self ) -> str:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Any = TimmBackbone(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ :List[str] = model(UpperCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ :List[Any] = config_and_inputs
snake_case__ :str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( _A , _A , _A , unittest.TestCase ):
_A = (TimmBackbone,) if is_torch_available() else ()
_A = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = TimmBackboneModelTester(self )
snake_case__ :Optional[Any] = ConfigTester(self ,config_class=UpperCamelCase ,has_text_modality=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = "resnet18"
snake_case__ :int = "microsoft/resnet-18"
snake_case__ :List[Any] = AutoBackbone.from_pretrained(UpperCamelCase ,use_timm_backbone=UpperCamelCase )
snake_case__ :List[Any] = AutoBackbone.from_pretrained(UpperCamelCase )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
snake_case__ :str = AutoBackbone.from_pretrained(UpperCamelCase ,use_timm_backbone=UpperCamelCase ,out_indices=[1, 2, 3] )
snake_case__ :str = AutoBackbone.from_pretrained(UpperCamelCase ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowerCAmelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCAmelCase_ ( self ) -> str:
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowerCAmelCase_ ( self ) -> Dict:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCAmelCase_ ( self ) -> int:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCAmelCase_ ( self ) -> Dict:
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowerCAmelCase_ ( self ) -> str:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ , snake_case__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Tuple = model_class(UpperCamelCase )
snake_case__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :List[str] = [*signature.parameters.keys()]
snake_case__ :Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ , snake_case__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ :Any = True
snake_case__ :str = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case__ :Tuple = self.all_model_classes[0]
snake_case__ :str = model_class(UpperCamelCase )
model.to(UpperCamelCase )
snake_case__ :Optional[int] = self._prepare_for_class(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = model(**UpperCamelCase )
snake_case__ :Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case__ :Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case__ :Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ , snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Tuple = model(**UpperCamelCase )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case__ :Optional[Any] = copy.deepcopy(UpperCamelCase )
snake_case__ :List[str] = None
snake_case__ :List[Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :List[Any] = model(**UpperCamelCase )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
snake_case__ :Tuple = copy.deepcopy(UpperCamelCase )
snake_case__ :Tuple = False
snake_case__ :Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Optional[int] = model(**UpperCamelCase )
| 57
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
def lowercase_ ( __snake_case : int = 4 ) -> list[list[int]]:
'''simple docstring'''
snake_case__ :Union[str, Any] = abs(__snake_case ) or 4
return [[1 + x + y * row_size for x in range(__snake_case )] for y in range(__snake_case )]
def lowercase_ ( __snake_case : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(__snake_case ) )
# OR.. transpose(reverse_column(matrix))
def lowercase_ ( __snake_case : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(__snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase_ ( __snake_case : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(__snake_case ) )
# OR.. transpose(reverse_row(matrix))
def lowercase_ ( __snake_case : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
snake_case__ :int = [list(__snake_case ) for x in zip(*__snake_case )]
return matrix
def lowercase_ ( __snake_case : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
snake_case__ :Optional[int] = matrix[::-1]
return matrix
def lowercase_ ( __snake_case : list[list[int]] ) -> list[list[int]]:
'''simple docstring'''
snake_case__ :Dict = [x[::-1] for x in matrix]
return matrix
def lowercase_ ( __snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in matrix:
print(*__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : int = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
__UpperCAmelCase : Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
__UpperCAmelCase : List[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 1
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCAmelCase : int = {
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
__UpperCAmelCase : str = "hopper-medium-v2"
__UpperCAmelCase : Optional[int] = gym.make(env_name)
__UpperCAmelCase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__UpperCAmelCase : List[str] = env.reset()
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : List[str] = 1_0_0_0
__UpperCAmelCase : str = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCAmelCase : List[str] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = env.step(denorm_actions)
__UpperCAmelCase : List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCAmelCase : Union[str, Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__UpperCAmelCase : List[str] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def lowercase_ ( __snake_case : int , __snake_case : Any , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
snake_case__ :Any = getattr(__snake_case , __snake_case )
if weight_type is not None:
snake_case__ :List[Any] = getattr(__snake_case , __snake_case ).shape
else:
snake_case__ :str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case__ :Tuple = value
elif weight_type == "weight_g":
snake_case__ :List[str] = value
elif weight_type == "weight_v":
snake_case__ :Tuple = value
elif weight_type == "bias":
snake_case__ :Optional[int] = value
else:
snake_case__ :Optional[Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __snake_case : str , __snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Dict = []
snake_case__ :List[str] = fairseq_model.state_dict()
snake_case__ :List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ :str = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case__ :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ :Optional[Any] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ :Dict = True
if "*" in mapped_key:
snake_case__ :int = name.split(__snake_case )[0].split("." )[-2]
snake_case__ :Dict = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
snake_case__ :Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case__ :int = "weight_v"
elif "bias" in name:
snake_case__ :int = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ :Optional[int] = "weight"
else:
snake_case__ :List[Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ :Dict = full_name.split("conv_layers." )[-1]
snake_case__ :Union[str, Any] = name.split("." )
snake_case__ :Dict = int(items[0] )
snake_case__ :Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case__ :Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case__ :Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case__ :Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case__ :Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[str]=None , __snake_case : Optional[int]=True ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
snake_case__ :int = UniSpeechSatConfig.from_pretrained(__snake_case )
else:
snake_case__ :Tuple = UniSpeechSatConfig()
snake_case__ :Optional[Any] = ""
if is_finetuned:
snake_case__ :Optional[Any] = UniSpeechSatForCTC(__snake_case )
else:
snake_case__ :Optional[Any] = UniSpeechSatForPreTraining(__snake_case )
snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ :Tuple = model[0].eval()
recursively_load_weights(__snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 57
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _snake_case ( _A ):
_A = 'time_series_transformer'
_A = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = "student_t" ,UpperCamelCase = "nll" ,UpperCamelCase = 1 ,UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] ,UpperCamelCase = "mean" ,UpperCamelCase = 0 ,UpperCamelCase = 0 ,UpperCamelCase = 0 ,UpperCamelCase = 0 ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = 32 ,UpperCamelCase = 32 ,UpperCamelCase = 2 ,UpperCamelCase = 2 ,UpperCamelCase = 2 ,UpperCamelCase = 2 ,UpperCamelCase = True ,UpperCamelCase = "gelu" ,UpperCamelCase = 64 ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 100 ,UpperCamelCase = 0.02 ,UpperCamelCase=True ,**UpperCamelCase ,) -> List[Any]:
# time series specific configuration
snake_case__ :Optional[Any] = prediction_length
snake_case__ :str = context_length or prediction_length
snake_case__ :Dict = distribution_output
snake_case__ :Tuple = loss
snake_case__ :Optional[int] = input_size
snake_case__ :Union[str, Any] = num_time_features
snake_case__ :List[Any] = lags_sequence
snake_case__ :Union[str, Any] = scaling
snake_case__ :Union[str, Any] = num_dynamic_real_features
snake_case__ :List[str] = num_static_real_features
snake_case__ :List[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
snake_case__ :Any = cardinality
else:
snake_case__ :Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
snake_case__ :Optional[Any] = embedding_dimension
else:
snake_case__ :str = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
snake_case__ :Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
snake_case__ :int = input_size * len(UpperCamelCase ) + self._number_of_features
snake_case__ :List[str] = d_model
snake_case__ :Any = encoder_attention_heads
snake_case__ :Dict = decoder_attention_heads
snake_case__ :Any = encoder_ffn_dim
snake_case__ :Tuple = decoder_ffn_dim
snake_case__ :str = encoder_layers
snake_case__ :int = decoder_layers
snake_case__ :Any = dropout
snake_case__ :Dict = attention_dropout
snake_case__ :Dict = activation_dropout
snake_case__ :int = encoder_layerdrop
snake_case__ :Union[str, Any] = decoder_layerdrop
snake_case__ :Optional[Any] = activation_function
snake_case__ :Tuple = init_std
snake_case__ :Optional[int] = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase ,**UpperCamelCase )
@property
def lowerCAmelCase_ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 57
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : str = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class _snake_case ( _A ):
_A = 'rwkv'
_A = {'max_position_embeddings': 'context_length'}
def __init__( self ,UpperCamelCase=50_277 ,UpperCamelCase=1_024 ,UpperCamelCase=4_096 ,UpperCamelCase=32 ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=1E-5 ,UpperCamelCase=0 ,UpperCamelCase=0 ,UpperCamelCase=6 ,UpperCamelCase=False ,UpperCamelCase=True ,**UpperCamelCase ,) -> Dict:
snake_case__ :Dict = vocab_size
snake_case__ :Optional[int] = context_length
snake_case__ :int = hidden_size
snake_case__ :Optional[Any] = num_hidden_layers
snake_case__ :Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case__ :List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case__ :List[str] = layer_norm_epsilon
snake_case__ :Tuple = rescale_every
snake_case__ :Optional[int] = use_cache
snake_case__ :Dict = bos_token_id
snake_case__ :List[str] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase )
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase : Optional[int] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__UpperCAmelCase : Tuple = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def lowercase_ ( ) -> int:
'''simple docstring'''
snake_case__ :Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ :Union[str, Any] = bs[:]
snake_case__ :Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
snake_case__ :List[str] = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def lowercase_ ( __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = set()
snake_case__ :Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ :Tuple = char
return pairs
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'attention_mask']
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase="replace" ,UpperCamelCase="<s>" ,UpperCamelCase="</s>" ,UpperCamelCase="</s>" ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase=False ,**UpperCamelCase ,) -> Optional[int]:
snake_case__ :str = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else bos_token
snake_case__ :int = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else eos_token
snake_case__ :Any = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else sep_token
snake_case__ :List[str] = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else cls_token
snake_case__ :Tuple = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else unk_token
snake_case__ :Dict = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ :List[Any] = AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ) if isinstance(UpperCamelCase ,UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase ,bos_token=UpperCamelCase ,eos_token=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,cls_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,add_prefix_space=UpperCamelCase ,**UpperCamelCase ,)
with open(UpperCamelCase ,encoding="utf-8" ) as vocab_handle:
snake_case__ :Optional[int] = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ :Dict = errors # how to handle errors in decoding
snake_case__ :int = bytes_to_unicode()
snake_case__ :List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase ,encoding="utf-8" ) as merges_handle:
snake_case__ :Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
snake_case__ :List[str] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ :Union[str, Any] = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
snake_case__ :Dict = {}
snake_case__ :int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ :str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase_ ( self ) -> Any:
return len(self.encoder )
def lowerCAmelCase_ ( self ) -> Optional[int]:
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
if token in self.cache:
return self.cache[token]
snake_case__ :Dict = tuple(UpperCamelCase )
snake_case__ :List[Any] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
snake_case__ :List[Any] = min(UpperCamelCase ,key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ :Optional[int] = bigram
snake_case__ :List[str] = []
snake_case__ :Tuple = 0
while i < len(UpperCamelCase ):
try:
snake_case__ :List[str] = word.index(UpperCamelCase ,UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ :List[Any] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ :Any = tuple(UpperCamelCase )
snake_case__ :Any = new_word
if len(UpperCamelCase ) == 1:
break
else:
snake_case__ :Union[str, Any] = get_pairs(UpperCamelCase )
snake_case__ :Tuple = " ".join(UpperCamelCase )
snake_case__ :Optional[Any] = word
return word
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :str = []
for token in re.findall(self.pat ,UpperCamelCase ):
snake_case__ :Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
return self.encoder.get(UpperCamelCase ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[str]:
return self.decoder.get(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> int:
snake_case__ :Tuple = "".join(UpperCamelCase )
snake_case__ :Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case__ :List[Any] = os.path.join(
UpperCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ :str = os.path.join(
UpperCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=UpperCamelCase ,ensure_ascii=UpperCamelCase ) + "\n" )
snake_case__ :int = 0
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
snake_case__ :Tuple = token_index
writer.write(" ".join(UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ :Any = [self.cls_token_id]
snake_case__ :Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Union[str, Any] = [self.sep_token_id]
snake_case__ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=False ,**UpperCamelCase ) -> Optional[Any]:
snake_case__ :Tuple = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
snake_case__ :Any = " " + text
return (text, kwargs)
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase : str = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class _snake_case ( _A ):
_A = 'facebook/nllb-200-distilled-600M'
_A = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
_A = 'translator'
_A = AutoTokenizer
_A = AutoModelForSeqaSeqLM
_A = LANGUAGE_CODES
_A = ['text', 'text', 'text']
_A = ['text']
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
snake_case__ :Optional[Any] = self.lang_to_code[src_lang]
snake_case__ :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase ,return_tensors="pt" ,src_lang=UpperCamelCase ,tgt_lang=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
return self.model.generate(**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=UpperCamelCase )
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowercase_ ( __snake_case : SplitDict ) -> int:
'''simple docstring'''
snake_case__ :Any = split_dict._to_yaml_list()
assert len(__snake_case ) == len(__snake_case )
snake_case__ :int = SplitDict._from_yaml_list(__snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case__ :List[Any] = None
# the split name of split_dict takes over the name of the split info object
snake_case__ :List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__snake_case ), SplitInfo(dataset_name="my_dataset" )] )
def lowercase_ ( __snake_case : Dict ) -> Any:
'''simple docstring'''
snake_case__ :Any = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 57
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Tuple=False ) -> List[str]:
'''simple docstring'''
snake_case__ :List[Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : Tuple=None ) -> Optional[int]:
'''simple docstring'''
if conf_path is None:
snake_case__ :str = "./model_checkpoints/vqgan_only.yaml"
snake_case__ :Tuple = load_config(__snake_case , display=__snake_case )
snake_case__ :str = VQModel(**config.model.params )
if ckpt_path is None:
snake_case__ :Union[str, Any] = "./model_checkpoints/vqgan_only.pt"
snake_case__ :List[Any] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
snake_case__ :List[Any] = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def lowercase_ ( __snake_case : int , __snake_case : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ :Union[str, Any] = model.encode(__snake_case )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
snake_case__ :str = model.decode(__snake_case )
return xrec
def lowercase_ ( __snake_case : int , __snake_case : Dict=False ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ :List[Any] = string.rsplit("." , 1 )
if reload:
snake_case__ :Optional[Any] = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def lowercase_ ( __snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowercase_ ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Optional[Any]=True , __snake_case : Any=True ) -> int:
'''simple docstring'''
snake_case__ :str = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
if ckpt:
snake_case__ :int = torch.load(__snake_case , map_location="cpu" )
snake_case__ :Optional[int] = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
snake_case__ :Optional[int] = {"state_dict": None}
snake_case__ :Optional[Any] = None
snake_case__ :Dict = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 57
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
from collections import namedtuple
__UpperCAmelCase : List[str] = namedtuple("from_to", "from_ to")
__UpperCAmelCase : int = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def lowercase_ ( __snake_case : float , __snake_case : str , __snake_case : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ", ".join(__snake_case ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ", ".join(__snake_case ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 1
|
from __future__ import annotations
__UpperCAmelCase : str = "Muhammad Umer Farooq"
__UpperCAmelCase : Tuple = "MIT"
__UpperCAmelCase : Union[str, Any] = "1.0.0"
__UpperCAmelCase : Dict = "Muhammad Umer Farooq"
__UpperCAmelCase : List[str] = "contact@muhammadumerfarooq.me"
__UpperCAmelCase : Tuple = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _snake_case ( _A ):
def __init__( self ,UpperCamelCase ) -> None:
super().__init__()
snake_case__ :list[str] = []
snake_case__ :List[Any] = domain
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ :List[str] = parse.urljoin(self.domain ,UpperCamelCase )
self.urls.append(UpperCamelCase )
def lowercase_ ( __snake_case : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__snake_case ).split("." )[-2:] )
def lowercase_ ( __snake_case : str ) -> str:
'''simple docstring'''
return parse.urlparse(__snake_case ).netloc
def lowercase_ ( __snake_case : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ :str = get_domain_name(__snake_case )
# Initialize the parser
snake_case__ :str = Parser(__snake_case )
try:
# Open URL
snake_case__ :Union[str, Any] = requests.get(__snake_case )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ :Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ :str = requests.get(__snake_case )
# Get the valid email.
snake_case__ :List[Any] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__snake_case )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = emails_from_url("https://github.com")
print(F'''{len(emails)} emails found:''')
print("\n".join(sorted(emails)))
| 57
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : Tuple = {
"google/fnet-base": 5_1_2,
"google/fnet-large": 5_1_2,
}
__UpperCAmelCase : int = "▁"
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'token_type_ids']
_A = FNetTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=False ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase="<unk>" ,UpperCamelCase="[SEP]" ,UpperCamelCase="<pad>" ,UpperCamelCase="[CLS]" ,UpperCamelCase="[MASK]" ,**UpperCamelCase ,) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case__ :List[str] = (
AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase ,normalized=UpperCamelCase )
if isinstance(UpperCamelCase ,UpperCamelCase )
else mask_token
)
super().__init__(
UpperCamelCase ,tokenizer_file=UpperCamelCase ,do_lower_case=UpperCamelCase ,remove_space=UpperCamelCase ,keep_accents=UpperCamelCase ,unk_token=UpperCamelCase ,sep_token=UpperCamelCase ,pad_token=UpperCamelCase ,cls_token=UpperCamelCase ,mask_token=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :Optional[Any] = do_lower_case
snake_case__ :int = remove_space
snake_case__ :Optional[Any] = keep_accents
snake_case__ :Any = vocab_file
snake_case__ :Optional[int] = False if not self.vocab_file else True
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Tuple = [self.sep_token_id]
snake_case__ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case__ :List[str] = os.path.join(
UpperCamelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file ,UpperCamelCase )
return (out_vocab_file,)
| 57
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 1
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _snake_case ( _A ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase ,"tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCamelCase ,"depth_multiplier" ) )
class _snake_case :
def __init__( self ,UpperCamelCase ,UpperCamelCase=13 ,UpperCamelCase=3 ,UpperCamelCase=32 ,UpperCamelCase=0.25 ,UpperCamelCase=8 ,UpperCamelCase=8 ,UpperCamelCase=6 ,UpperCamelCase=32 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase="relu6" ,UpperCamelCase=1_280 ,UpperCamelCase=0.1 ,UpperCamelCase=0.02 ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase=10 ,UpperCamelCase=None ,) -> Any:
snake_case__ :Tuple = parent
snake_case__ :str = batch_size
snake_case__ :Union[str, Any] = num_channels
snake_case__ :Optional[int] = image_size
snake_case__ :str = depth_multiplier
snake_case__ :List[Any] = depth_divisible_by
snake_case__ :Union[str, Any] = min_depth
snake_case__ :Dict = expand_ratio
snake_case__ :Any = tf_padding
snake_case__ :Optional[Any] = output_stride
snake_case__ :Optional[int] = first_layer_is_expansion
snake_case__ :List[Any] = finegrained_output
snake_case__ :str = hidden_act
snake_case__ :List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case__ :Union[str, Any] = classifier_dropout_prob
snake_case__ :Optional[Any] = use_labels
snake_case__ :Optional[Any] = is_training
snake_case__ :Dict = num_labels
snake_case__ :Union[str, Any] = initializer_range
snake_case__ :Dict = scope
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ :List[Any] = None
snake_case__ :Dict = None
if self.use_labels:
snake_case__ :str = ids_tensor([self.batch_size] ,self.num_labels )
snake_case__ :List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
snake_case__ :Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self ) -> List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Dict = MobileNetVaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Union[str, Any] = model(UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
self.parent.assertEqual(
result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :List[Any] = self.num_labels
snake_case__ :Dict = MobileNetVaForImageClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Union[str, Any] = model(UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :str = self.num_labels
snake_case__ :List[str] = MobileNetVaForSemanticSegmentation(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ :Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
snake_case__ :str = model(UpperCamelCase ,labels=UpperCamelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = config_and_inputs
snake_case__ :List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _A , _A , unittest.TestCase ):
_A = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[str] = MobileNetVaModelTester(self )
snake_case__ :int = MobileNetVaConfigTester(self ,config_class=UpperCamelCase ,has_text_modality=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def lowerCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def lowerCAmelCase_ ( self ) -> List[Any]:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ , snake_case__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Dict = model_class(UpperCamelCase )
snake_case__ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ :Any = [*signature.parameters.keys()]
snake_case__ :Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
def check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
snake_case__ :Union[str, Any] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case__ :str = model(**self._prepare_for_class(UpperCamelCase ,UpperCamelCase ) )
snake_case__ :Dict = outputs.hidden_states
snake_case__ :List[Any] = 16
self.assertEqual(len(UpperCamelCase ) ,UpperCamelCase )
snake_case__ , snake_case__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ :Optional[int] = True
check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ :str = True
check_hidden_states_output(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Tuple:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ :str = MobileNetVaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
snake_case__ :Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> Any:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Union[str, Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(UpperCamelCase )
snake_case__ :int = self.default_image_processor
snake_case__ :Any = prepare_img()
snake_case__ :int = image_processor(images=UpperCamelCase ,return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ :Union[str, Any] = model(**UpperCamelCase )
# verify the logits
snake_case__ :Optional[Any] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape ,UpperCamelCase )
snake_case__ :Any = torch.tensor([0.2445, -1.1993, 0.1905] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase ,atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
snake_case__ :Dict = model.to(UpperCamelCase )
snake_case__ :List[str] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
snake_case__ :List[str] = prepare_img()
snake_case__ :Optional[Any] = image_processor(images=UpperCamelCase ,return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ :Union[str, Any] = model(**UpperCamelCase )
snake_case__ :List[str] = outputs.logits
# verify the logits
snake_case__ :Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape ,UpperCamelCase )
snake_case__ :Union[str, Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] ,device=UpperCamelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,UpperCamelCase ,atol=1E-4 ) )
| 57
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 1
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( _A , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCAmelCase_ ( self ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Optional[int] = ort.SessionOptions()
snake_case__ :Any = False
return options
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case__ :List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" ,revision="onnx" ,safety_checker=UpperCamelCase ,feature_extractor=UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Tuple = "A red cat sitting on a park bench"
snake_case__ :Optional[Any] = np.random.RandomState(0 )
snake_case__ :List[str] = pipe(
prompt=UpperCamelCase ,image=UpperCamelCase ,mask_image=UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=UpperCamelCase ,output_type="np" ,)
snake_case__ :int = output.images
snake_case__ :Optional[int] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ :Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
snake_case__ :List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
snake_case__ :Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" ,subfolder="scheduler" ,revision="onnx" )
snake_case__ :Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" ,revision="onnx" ,scheduler=UpperCamelCase ,safety_checker=UpperCamelCase ,feature_extractor=UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Tuple = "A red cat sitting on a park bench"
snake_case__ :Dict = np.random.RandomState(0 )
snake_case__ :Optional[Any] = pipe(
prompt=UpperCamelCase ,image=UpperCamelCase ,mask_image=UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=UpperCamelCase ,output_type="np" ,)
snake_case__ :Tuple = output.images
snake_case__ :str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case__ :List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 57
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 1
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[int] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Tuple ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[int] = FunnelConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
snake_case__ :Tuple = FunnelBaseModel(__snake_case ) if base_model else FunnelModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__UpperCAmelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 57
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = b.T
snake_case__ :Optional[Any] = np.sum(np.square(__snake_case ) , axis=1 )
snake_case__ :Tuple = np.sum(np.square(__snake_case ) , axis=0 )
snake_case__ :Union[str, Any] = np.matmul(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Any:
'''simple docstring'''
snake_case__ :Optional[Any] = x.reshape(-1 , 3 )
snake_case__ :List[str] = squared_euclidean_distance(__snake_case , __snake_case )
return np.argmin(__snake_case , axis=1 )
class _snake_case ( _A ):
_A = ['pixel_values']
def __init__( self ,UpperCamelCase = None ,UpperCamelCase = True ,UpperCamelCase = None ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = True ,UpperCamelCase = True ,**UpperCamelCase ,) -> None:
super().__init__(**UpperCamelCase )
snake_case__ :List[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case__ :str = get_size_dict(UpperCamelCase )
snake_case__ :Dict = np.array(UpperCamelCase ) if clusters is not None else None
snake_case__ :str = do_resize
snake_case__ :List[str] = size
snake_case__ :List[Any] = resample
snake_case__ :Union[str, Any] = do_normalize
snake_case__ :int = do_color_quantize
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = PILImageResampling.BILINEAR ,UpperCamelCase = None ,**UpperCamelCase ,) -> np.ndarray:
snake_case__ :List[str] = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
UpperCamelCase ,size=(size["height"], size["width"]) ,resample=UpperCamelCase ,data_format=UpperCamelCase ,**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,) -> np.ndarray:
snake_case__ :Tuple = rescale(image=UpperCamelCase ,scale=1 / 127.5 ,data_format=UpperCamelCase )
snake_case__ :List[Any] = image - 1
return image
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = None ,UpperCamelCase = ChannelDimension.FIRST ,**UpperCamelCase ,) -> PIL.Image.Image:
snake_case__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case__ :int = size if size is not None else self.size
snake_case__ :Tuple = get_size_dict(UpperCamelCase )
snake_case__ :str = resample if resample is not None else self.resample
snake_case__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
snake_case__ :List[Any] = clusters if clusters is not None else self.clusters
snake_case__ :str = np.array(UpperCamelCase )
snake_case__ :int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
snake_case__ :Union[str, Any] = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
snake_case__ :int = [self.resize(image=UpperCamelCase ,size=UpperCamelCase ,resample=UpperCamelCase ) for image in images]
if do_normalize:
snake_case__ :Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
snake_case__ :Optional[Any] = [to_channel_dimension_format(UpperCamelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
snake_case__ :Union[str, Any] = np.array(UpperCamelCase )
snake_case__ :Optional[int] = color_quantize(UpperCamelCase ,UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
snake_case__ :List[Any] = images.shape[0]
snake_case__ :str = images.reshape(UpperCamelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
snake_case__ :Any = list(UpperCamelCase )
else:
snake_case__ :List[str] = [to_channel_dimension_format(UpperCamelCase ,UpperCamelCase ) for image in images]
snake_case__ :List[str] = {"input_ids": images}
return BatchFeature(data=UpperCamelCase ,tensor_type=UpperCamelCase )
| 57
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : int = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
| 1
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase_ ( __snake_case : Any ) -> Any:
'''simple docstring'''
if isinstance(__snake_case , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _snake_case :
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> str:
pass
def lowerCAmelCase_ ( self ) -> str:
pass
def lowerCAmelCase_ ( self ) -> List[str]:
pass
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Tuple:
snake_case__ :Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(UpperCamelCase ,UpperCamelCase ,f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> Dict:
snake_case__ :Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase ,UpperCamelCase )
snake_case__ :List[str] = FlaxVisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :Dict = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> List[Any]:
snake_case__ , snake_case__ :Any = self.get_vision_text_model(UpperCamelCase ,UpperCamelCase )
snake_case__ :Optional[int] = {"vision_model": vision_model, "text_model": text_model}
snake_case__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
snake_case__ :str = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> Union[str, Any]:
snake_case__ , snake_case__ :List[Any] = self.get_vision_text_model(UpperCamelCase ,UpperCamelCase )
snake_case__ :Optional[int] = {"vision_model": vision_model, "text_model": text_model}
snake_case__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
snake_case__ :Any = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
snake_case__ :Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase )
snake_case__ :Any = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
snake_case__ :Any = model(input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase )
snake_case__ :List[str] = after_output[0]
snake_case__ :Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase ,1E-3 )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,**UpperCamelCase ) -> int:
snake_case__ , snake_case__ :List[Any] = self.get_vision_text_model(UpperCamelCase ,UpperCamelCase )
snake_case__ :Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
snake_case__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase )
snake_case__ :int = model(
input_ids=UpperCamelCase ,pixel_values=UpperCamelCase ,attention_mask=UpperCamelCase ,output_attentions=UpperCamelCase )
snake_case__ :Tuple = output.vision_model_output.attentions
self.assertEqual(len(UpperCamelCase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ :Optional[Any] = to_atuple(vision_model.config.image_size )
snake_case__ :int = to_atuple(vision_model.config.patch_size )
snake_case__ :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
snake_case__ :List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
snake_case__ :Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(UpperCamelCase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
pt_model.to(UpperCamelCase )
pt_model.eval()
# prepare inputs
snake_case__ :Optional[int] = inputs_dict
snake_case__ :List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
snake_case__ :str = pt_model(**UpperCamelCase ).to_tuple()
snake_case__ :Optional[Any] = fx_model(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase )
snake_case__ :Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase ,from_pt=UpperCamelCase )
snake_case__ :str = fx_model_loaded(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(UpperCamelCase ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase ,from_flax=UpperCamelCase )
pt_model_loaded.to(UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
snake_case__ :Tuple = pt_model_loaded(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) ,len(UpperCamelCase ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(UpperCamelCase ,pt_output_loaded.numpy() ,4E-2 )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase ,UpperCamelCase )
snake_case__ :str = VisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :int = FlaxVisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,UpperCamelCase )
snake_case__ :int = fx_state
self.check_pt_flax_equivalence(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
snake_case__ :Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = VisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :Union[str, Any] = FlaxVisionTextDualEncoderModel(UpperCamelCase )
snake_case__ :List[Any] = load_flax_weights_in_pytorch_model(UpperCamelCase ,fx_model.params )
self.check_pt_flax_equivalence(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**UpperCamelCase )
@is_pt_flax_cross_test
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[Any] = self.prepare_config_and_inputs()
snake_case__ :Optional[int] = config_inputs_dict.pop("vision_config" )
snake_case__ :Dict = config_inputs_dict.pop("text_config" )
snake_case__ :Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
self.check_equivalence_flax_to_pt(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
@slow
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :Dict = self.get_pretrained_model_and_inputs()
snake_case__ :List[Any] = model_a(**UpperCamelCase )
snake_case__ :Optional[int] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(UpperCamelCase )
snake_case__ :Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase )
snake_case__ :List[str] = model_a(**UpperCamelCase )
snake_case__ :Tuple = after_outputs[0]
snake_case__ :Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCamelCase ,1E-5 )
@require_flax
class _snake_case ( _A , unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" ,"hf-internal-testing/tiny-bert" ,vision_from_pt=UpperCamelCase ,text_from_pt=UpperCamelCase ,)
snake_case__ :Union[str, Any] = 13
snake_case__ :List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case__ :Union[str, Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
snake_case__ :Optional[int] = random_attention_mask([batch_size, 4] )
snake_case__ :Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> List[str]:
snake_case__ :Dict = FlaxViTModel(UpperCamelCase )
snake_case__ :Tuple = FlaxBertModel(UpperCamelCase )
return vision_model, text_model
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[int] = FlaxViTModelTester(self )
snake_case__ :Optional[Any] = FlaxBertModelTester(self )
snake_case__ :Dict = vit_model_tester.prepare_config_and_inputs()
snake_case__ :int = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ :int = vision_config_and_inputs
snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _snake_case ( _A , unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" ,"hf-internal-testing/tiny-bert" ,vision_from_pt=UpperCamelCase ,text_from_pt=UpperCamelCase ,)
snake_case__ :List[Any] = 13
snake_case__ :Dict = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
snake_case__ :Union[str, Any] = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
snake_case__ :Optional[Any] = random_attention_mask([batch_size, 4] )
snake_case__ :Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :Any = FlaxCLIPVisionModel(UpperCamelCase )
snake_case__ :Union[str, Any] = FlaxBertModel(UpperCamelCase )
return vision_model, text_model
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Optional[Any] = FlaxCLIPVisionModelTester(self )
snake_case__ :Optional[int] = FlaxBertModelTester(self )
snake_case__ :Tuple = clip_model_tester.prepare_config_and_inputs()
snake_case__ :int = bert_model_tester.prepare_config_and_inputs()
snake_case__ , snake_case__ :Optional[int] = vision_config_and_inputs
snake_case__ , snake_case__ , snake_case__ , snake_case__ :Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Dict = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" ,logit_scale_init_value=1.0 )
snake_case__ :Any = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
snake_case__ :Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
snake_case__ :Tuple = processor(
text=["una foto di un gatto", "una foto di un cane"] ,images=UpperCamelCase ,padding=UpperCamelCase ,return_tensors="np" )
snake_case__ :List[Any] = model(**UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
snake_case__ :Dict = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,UpperCamelCase ,atol=1E-3 ) )
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 57
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __snake_case : int , __snake_case : Tuple , __snake_case : str , __snake_case : int ) -> Dict:
'''simple docstring'''
snake_case__ :Tuple = BigBirdConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
snake_case__ :Union[str, Any] = BigBirdForQuestionAnswering(__snake_case )
else:
snake_case__ :List[str] = BigBirdForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__snake_case , __snake_case , is_trivia_qa=__snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
__UpperCAmelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 57
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 1
|
from __future__ import annotations
def lowercase_ ( __snake_case : list[float] ) -> float:
'''simple docstring'''
snake_case__ :str = 0.0_0
snake_case__ :int = 0
for resistor in resistors:
if resistor <= 0:
snake_case__ :int = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(__snake_case )
first_sum += 1 / float(__snake_case )
index += 1
return 1 / first_sum
def lowercase_ ( __snake_case : list[float] ) -> float:
'''simple docstring'''
snake_case__ :List[Any] = 0.0_0
snake_case__ :Dict = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case__ :int = F'Resistor at index {index} has a negative value!'
raise ValueError(__snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 1
|
import logging
from transformers import PretrainedConfig
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
__UpperCAmelCase : List[str] = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class _snake_case ( _A ):
_A = 'bertabs'
def __init__( self ,UpperCamelCase=30_522 ,UpperCamelCase=512 ,UpperCamelCase=6 ,UpperCamelCase=512 ,UpperCamelCase=8 ,UpperCamelCase=512 ,UpperCamelCase=0.2 ,UpperCamelCase=6 ,UpperCamelCase=768 ,UpperCamelCase=8 ,UpperCamelCase=2_048 ,UpperCamelCase=0.2 ,**UpperCamelCase ,) -> Optional[int]:
super().__init__(**UpperCamelCase )
snake_case__ :int = vocab_size
snake_case__ :Optional[Any] = max_pos
snake_case__ :List[Any] = enc_layers
snake_case__ :List[str] = enc_hidden_size
snake_case__ :str = enc_heads
snake_case__ :Optional[Any] = enc_ff_size
snake_case__ :Any = enc_dropout
snake_case__ :int = dec_layers
snake_case__ :List[str] = dec_hidden_size
snake_case__ :Dict = dec_heads
snake_case__ :Any = dec_ff_size
snake_case__ :str = dec_dropout
| 57
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _A , unittest.TestCase ):
_A = KandinskyVaaControlnetImgaImgPipeline
_A = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_A = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
_A = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_A = False
@property
def lowerCAmelCase_ ( self ) -> Tuple:
return 32
@property
def lowerCAmelCase_ ( self ) -> str:
return 32
@property
def lowerCAmelCase_ ( self ) -> List[str]:
return self.time_input_dim
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return 100
@property
def lowerCAmelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
snake_case__ :Optional[Any] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
snake_case__ :List[Any] = UNetaDConditionModel(**UpperCamelCase )
return model
@property
def lowerCAmelCase_ ( self ) -> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ :Dict = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :str = self.dummy_unet
snake_case__ :List[Any] = self.dummy_movq
snake_case__ :List[Any] = {
"num_train_timesteps": 1_000,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
snake_case__ :List[str] = DDIMScheduler(**UpperCamelCase )
snake_case__ :Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[int]:
snake_case__ :Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ :int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
UpperCamelCase )
# create init_image
snake_case__ :Optional[int] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
snake_case__ :List[str] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case__ :Any = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create hint
snake_case__ :int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :int = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :Tuple = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :List[Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Dict = "cpu"
snake_case__ :Union[str, Any] = self.get_dummy_components()
snake_case__ :Dict = self.pipeline_class(**UpperCamelCase )
snake_case__ :Union[str, Any] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Optional[int] = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
snake_case__ :Dict = output.images
snake_case__ :List[Any] = pipe(
**self.get_dummy_inputs(UpperCamelCase ) ,return_dict=UpperCamelCase ,)[0]
snake_case__ :Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ :Union[str, Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
snake_case__ :str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
snake_case__ :Any = init_image.resize((512, 512) )
snake_case__ :int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
snake_case__ :Tuple = torch.from_numpy(np.array(UpperCamelCase ) ).float() / 255.0
snake_case__ :List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
snake_case__ :Dict = "A robot, 4k photo"
snake_case__ :Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase )
snake_case__ :Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" ,torch_dtype=torch.floataa )
snake_case__ :Optional[Any] = pipeline.to(UpperCamelCase )
pipeline.set_progress_bar_config(disable=UpperCamelCase )
snake_case__ :Dict = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case__ , snake_case__ :Dict = pipe_prior(
UpperCamelCase ,image=UpperCamelCase ,strength=0.85 ,generator=UpperCamelCase ,negative_prompt="" ,).to_tuple()
snake_case__ :Union[str, Any] = pipeline(
image=UpperCamelCase ,image_embeds=UpperCamelCase ,negative_image_embeds=UpperCamelCase ,hint=UpperCamelCase ,generator=UpperCamelCase ,num_inference_steps=100 ,height=512 ,width=512 ,strength=0.5 ,output_type="np" ,)
snake_case__ :Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase ,UpperCamelCase )
| 57
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 1
|
import inspect
import unittest
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def lowerCAmelCase_ ( self ) -> Any:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case__ :int = inspect.getmembers(UpperCamelCase ,inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case__ :Optional[Any] = "k-diffusion"
elif backend == "invisible_watermark":
snake_case__ :Union[str, Any] = "invisible-watermark"
assert backend in deps, f'{backend} is not in the deps table!'
| 57
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
snake_case__ :List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
snake_case__ :Optional[int] = model(UpperCamelCase )["last_hidden_state"]
snake_case__ :Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,UpperCamelCase )
# compare the actual values for a slice.
snake_case__ :Optional[int] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 57
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 1
|
import pytest
__UpperCAmelCase : int = "__dummy_dataset1__"
__UpperCAmelCase : int = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowercase_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[Any] = dataset_loading_script_name
snake_case__ :Optional[Any] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__snake_case )
snake_case__ :List[Any] = script_dir / F'{script_name}.py'
with open(__snake_case , "w" ) as f:
f.write(__snake_case )
return str(__snake_case )
| 57
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _snake_case ( _A ):
_A = 42
class _snake_case ( _A , _A ):
_A = True
@register_to_config
def __init__( self ,UpperCamelCase = 3 ,UpperCamelCase = 3 ,UpperCamelCase = ("DownEncoderBlock2D",) ,UpperCamelCase = ("UpDecoderBlock2D",) ,UpperCamelCase = (64,) ,UpperCamelCase = 1 ,UpperCamelCase = "silu" ,UpperCamelCase = 4 ,UpperCamelCase = 32 ,UpperCamelCase = 32 ,UpperCamelCase = 0.18215 ,) -> int:
super().__init__()
# pass init params to Encoder
snake_case__ :Union[str, Any] = Encoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,down_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,act_fn=UpperCamelCase ,norm_num_groups=UpperCamelCase ,double_z=UpperCamelCase ,)
# pass init params to Decoder
snake_case__ :int = Decoder(
in_channels=UpperCamelCase ,out_channels=UpperCamelCase ,up_block_types=UpperCamelCase ,block_out_channels=UpperCamelCase ,layers_per_block=UpperCamelCase ,norm_num_groups=UpperCamelCase ,act_fn=UpperCamelCase ,)
snake_case__ :Any = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
snake_case__ :List[Any] = nn.Convad(UpperCamelCase ,UpperCamelCase ,1 )
snake_case__ :List[str] = False
snake_case__ :List[str] = False
# only relevant if vae tiling is enabled
snake_case__ :str = self.config.sample_size
snake_case__ :Optional[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
snake_case__ :str = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
snake_case__ :int = 0.25
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=False ) -> str:
if isinstance(UpperCamelCase ,(Encoder, Decoder) ):
snake_case__ :List[str] = value
def lowerCAmelCase_ ( self ,UpperCamelCase = True ) -> List[str]:
snake_case__ :Any = use_tiling
def lowerCAmelCase_ ( self ) -> List[str]:
self.enable_tiling(UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = True
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self ) -> Dict[str, AttentionProcessor]:
snake_case__ :List[Any] = {}
def fn_recursive_add_processors(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
if hasattr(UpperCamelCase ,"set_processor" ):
snake_case__ :int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' ,UpperCamelCase ,UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
return processors
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :List[str] = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase ,UpperCamelCase ) and len(UpperCamelCase ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(UpperCamelCase )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
if hasattr(UpperCamelCase ,"set_processor" ):
if not isinstance(UpperCamelCase ,UpperCamelCase ):
module.set_processor(UpperCamelCase )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' ,UpperCamelCase ,UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase ,return_dict=UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
snake_case__ :Optional[int] = [self.encoder(UpperCamelCase ) for x_slice in x.split(1 )]
snake_case__ :Union[str, Any] = torch.cat(UpperCamelCase )
else:
snake_case__ :Any = self.encoder(UpperCamelCase )
snake_case__ :str = self.quant_conv(UpperCamelCase )
snake_case__ :Dict = DiagonalGaussianDistribution(UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase ,return_dict=UpperCamelCase )
snake_case__ :Dict = self.post_quant_conv(UpperCamelCase )
snake_case__ :List[str] = self.decoder(UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
@apply_forward_hook
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
snake_case__ :str = [self._decode(UpperCamelCase ).sample for z_slice in z.split(1 )]
snake_case__ :Any = torch.cat(UpperCamelCase )
else:
snake_case__ :List[Any] = self._decode(UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :int = min(a.shape[2] ,b.shape[2] ,UpperCamelCase )
for y in range(UpperCamelCase ):
snake_case__ :str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
snake_case__ :Union[str, Any] = min(a.shape[3] ,b.shape[3] ,UpperCamelCase )
for x in range(UpperCamelCase ):
snake_case__ :int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> AutoencoderKLOutput:
snake_case__ :int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
snake_case__ :Optional[int] = int(self.tile_latent_min_size * self.tile_overlap_factor )
snake_case__ :Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
snake_case__ :List[Any] = []
for i in range(0 ,x.shape[2] ,UpperCamelCase ):
snake_case__ :List[str] = []
for j in range(0 ,x.shape[3] ,UpperCamelCase ):
snake_case__ :Union[str, Any] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
snake_case__ :List[Any] = self.encoder(UpperCamelCase )
snake_case__ :Optional[Any] = self.quant_conv(UpperCamelCase )
row.append(UpperCamelCase )
rows.append(UpperCamelCase )
snake_case__ :Tuple = []
for i, row in enumerate(UpperCamelCase ):
snake_case__ :Tuple = []
for j, tile in enumerate(UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case__ :List[Any] = self.blend_v(rows[i - 1][j] ,UpperCamelCase ,UpperCamelCase )
if j > 0:
snake_case__ :List[str] = self.blend_h(row[j - 1] ,UpperCamelCase ,UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase ,dim=3 ) )
snake_case__ :Tuple = torch.cat(UpperCamelCase ,dim=2 )
snake_case__ :Dict = DiagonalGaussianDistribution(UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case__ :Tuple = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
snake_case__ :Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
snake_case__ :List[str] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
snake_case__ :List[Any] = []
for i in range(0 ,z.shape[2] ,UpperCamelCase ):
snake_case__ :Tuple = []
for j in range(0 ,z.shape[3] ,UpperCamelCase ):
snake_case__ :Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
snake_case__ :Union[str, Any] = self.post_quant_conv(UpperCamelCase )
snake_case__ :Tuple = self.decoder(UpperCamelCase )
row.append(UpperCamelCase )
rows.append(UpperCamelCase )
snake_case__ :Optional[int] = []
for i, row in enumerate(UpperCamelCase ):
snake_case__ :Optional[int] = []
for j, tile in enumerate(UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case__ :List[str] = self.blend_v(rows[i - 1][j] ,UpperCamelCase ,UpperCamelCase )
if j > 0:
snake_case__ :str = self.blend_h(row[j - 1] ,UpperCamelCase ,UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase ,dim=3 ) )
snake_case__ :Union[str, Any] = torch.cat(UpperCamelCase ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = False ,UpperCamelCase = True ,UpperCamelCase = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
snake_case__ :Any = sample
snake_case__ :Optional[Any] = self.encode(UpperCamelCase ).latent_dist
if sample_posterior:
snake_case__ :Dict = posterior.sample(generator=UpperCamelCase )
else:
snake_case__ :int = posterior.mode()
snake_case__ :Optional[int] = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.