code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__UpperCAmelCase : Dict = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
__UpperCAmelCase : Optional[int] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Dict = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
__UpperCAmelCase : str = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Tuple = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
__UpperCAmelCase : Optional[Any] = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : List[Any] = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
__UpperCAmelCase : List[str] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : List[str] = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
__UpperCAmelCase : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Optional[Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
__UpperCAmelCase : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
__UpperCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Any = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
__UpperCAmelCase : int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
__UpperCAmelCase : List[str] = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
__UpperCAmelCase : Optional[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : int = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
__UpperCAmelCase : List[Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
__UpperCAmelCase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
__UpperCAmelCase : int = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : Optional[int] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
__UpperCAmelCase : List[str] = ""
__UpperCAmelCase : str = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
__UpperCAmelCase : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
__UpperCAmelCase : str = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Dict ) -> Tuple:
'''simple docstring'''
assert ReadMe.from_string(__snake_case , __snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
snake_case__ :Dict = ReadMe.from_string(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : List[str] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
with pytest.raises(__snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(__snake_case , __snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
ReadMe.from_string(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowercase_ ( __snake_case : Any , __snake_case : int ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
snake_case__ :Optional[int] = ReadMe.from_readme(__snake_case , __snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowercase_ ( __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :Dict = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
snake_case__ :Tuple = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
snake_case__ :List[str] = ReadMe.from_readme(__snake_case , __snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : Any , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
snake_case__ :Tuple = expected_error.format(path=__snake_case )
with pytest.raises(__snake_case , match=re.escape(__snake_case ) ):
ReadMe.from_readme(__snake_case , __snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowercase_ ( __snake_case : Any ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :Optional[Any] = Path(__snake_case ) / "README.md"
with open(__snake_case , "w+" ) as readme_file:
readme_file.write(__snake_case )
ReadMe.from_readme(__snake_case , __snake_case , suppress_parsing_errors=__snake_case )
| 703
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__UpperCAmelCase : Dict = True
except ImportError:
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( __snake_case : Namespace ) -> Dict:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Any:
snake_case__ :Dict = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" ,action="store_true" ,help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" ,type=UpperCamelCase ,help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" ,type=UpperCamelCase ,help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase=None ,*UpperCamelCase ) -> Any:
snake_case__ :Union[str, Any] = testing
snake_case__ :Union[str, Any] = testing_file
snake_case__ :List[str] = path
def lowerCAmelCase_ ( self ) -> List[Any]:
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case__ :Tuple = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(UpperCamelCase ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
snake_case__ :str = (
Path(UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case__ :Tuple = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase ) )
else:
with open(self._testing_file ,"r" ) as configuration_file:
snake_case__ :str = json.load(UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=UpperCamelCase ,extra_context=UpperCamelCase ,)
snake_case__ :List[Any] = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" ,"r" ) as configuration_file:
snake_case__ :Dict = json.load(UpperCamelCase )
snake_case__ :Optional[Any] = configuration["lowercase_modelname"]
snake_case__ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(f'{directory}/configuration.json' )
snake_case__ :Any = "PyTorch" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "TensorFlow" in generate_tensorflow_pytorch_and_flax
snake_case__ :Any = "Flax" in generate_tensorflow_pytorch_and_flax
snake_case__ :Dict = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(UpperCamelCase ,exist_ok=UpperCamelCase )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,"w" ):
pass
shutil.move(
f'{directory}/__init__.py' ,f'{model_dir}/__init__.py' ,)
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' ,f'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(UpperCamelCase ):
with open(UpperCamelCase ,"r" ) as f:
snake_case__ :List[str] = f.readlines()
with open(UpperCamelCase ,"w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' ,f'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' ,f'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' ,f'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' ,f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' ,f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ):
# Create temp file
snake_case__ , snake_case__ :Optional[Any] = mkstemp()
snake_case__ :Optional[Any] = False
with fdopen(UpperCamelCase ,"w" ) as new_file:
with open(UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase )
if line_to_copy_below in line:
snake_case__ :Optional[Any] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase ,UpperCamelCase )
# Remove original file
remove(UpperCamelCase )
# Move new file
move(UpperCamelCase ,UpperCamelCase )
def skip_units(UpperCamelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase ):
with open(UpperCamelCase ) as datafile:
snake_case__ :int = []
snake_case__ :Optional[int] = False
snake_case__ :List[str] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :Tuple = skip_units(UpperCamelCase )
elif "# Below: " in line and "##" not in line:
snake_case__ :Optional[Any] = line.split("\"" )[1]
snake_case__ :List[str] = skip_units(UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase ,UpperCamelCase ,UpperCamelCase )
snake_case__ :Tuple = []
elif "# Replace with" in line and "##" not in line:
snake_case__ :Optional[Any] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase )
remove(UpperCamelCase )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(UpperCamelCase )
| 57
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 704
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
__UpperCAmelCase : str = {"allegro/herbert-base-cased": 5_1_4}
__UpperCAmelCase : List[str] = {}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_INIT_CONFIGURATION
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = HerbertTokenizer
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<pad>" ,UpperCamelCase="<mask>" ,UpperCamelCase="</s>" ,**UpperCamelCase ,) -> Dict:
super().__init__(
UpperCamelCase ,UpperCamelCase ,tokenizer_file=UpperCamelCase ,cls_token=UpperCamelCase ,unk_token=UpperCamelCase ,pad_token=UpperCamelCase ,mask_token=UpperCamelCase ,sep_token=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Optional[int] = [self.cls_token_id]
snake_case__ :Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase ,token_ids_a=UpperCamelCase ,already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> List[int]:
snake_case__ :Any = [self.sep_token_id]
snake_case__ :Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
snake_case__ :List[str] = self._tokenizer.model.save(UpperCamelCase ,name=UpperCamelCase )
return tuple(UpperCamelCase )
| 57
| 0
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
_A = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = 50_257 ,UpperCamelCase = 1_024 ,UpperCamelCase = 768 ,UpperCamelCase = 12 ,UpperCamelCase = 12 ,UpperCamelCase = None ,UpperCamelCase = "gelu_new" ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 0.1 ,UpperCamelCase = 1E-5 ,UpperCamelCase = 0.02 ,UpperCamelCase = True ,UpperCamelCase = True ,UpperCamelCase = False ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
snake_case__ :List[str] = prefix_inner_dim
snake_case__ :List[Any] = prefix_hidden_dim
snake_case__ :List[str] = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case__ :Union[str, Any] = (
nn.Linear(self.prefix_hidden_dim ,UpperCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case__ :Tuple = GPTaConfig(
vocab_size=UpperCamelCase ,n_positions=UpperCamelCase ,n_embd=UpperCamelCase ,n_layer=UpperCamelCase ,n_head=UpperCamelCase ,n_inner=UpperCamelCase ,activation_function=UpperCamelCase ,resid_pdrop=UpperCamelCase ,embd_pdrop=UpperCamelCase ,attn_pdrop=UpperCamelCase ,layer_norm_epsilon=UpperCamelCase ,initializer_range=UpperCamelCase ,scale_attn_weights=UpperCamelCase ,use_cache=UpperCamelCase ,scale_attn_by_inverse_layer_idx=UpperCamelCase ,reorder_and_upcast_attn=UpperCamelCase ,)
snake_case__ :Dict = GPTaLMHeadModel(UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = None ,) -> List[str]:
snake_case__ :str = self.transformer.transformer.wte(UpperCamelCase )
snake_case__ :Optional[int] = self.encode_prefix(UpperCamelCase )
snake_case__ :Optional[Any] = self.decode_prefix(UpperCamelCase )
snake_case__ :List[Any] = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
snake_case__ :Optional[Any] = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
snake_case__ :Any = torch.cat((dummy_token, input_ids) ,dim=1 )
snake_case__ :Dict = self.transformer(inputs_embeds=UpperCamelCase ,labels=UpperCamelCase ,attention_mask=UpperCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> torch.Tensor:
return torch.zeros(UpperCamelCase ,self.prefix_length ,dtype=torch.intaa ,device=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Union[str, Any]:
return self.encode_prefix(UpperCamelCase )
@torch.no_grad()
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :Optional[Any] = torch.split(UpperCamelCase ,1 ,dim=0 )
snake_case__ :Tuple = []
snake_case__ :int = []
for feature in features:
snake_case__ :str = self.decode_prefix(feature.to(UpperCamelCase ) ) # back to the clip feature
# Only support beam search for now
snake_case__ :List[Any] = self.generate_beam(
input_embeds=UpperCamelCase ,device=UpperCamelCase ,eos_token_id=UpperCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case__ :List[str] = torch.stack(UpperCamelCase )
snake_case__ :Tuple = torch.stack(UpperCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase_ ( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase = 5 ,UpperCamelCase = 67 ,UpperCamelCase = 1.0 ,UpperCamelCase = None ,) -> Any:
snake_case__ :str = eos_token_id
snake_case__ :Any = None
snake_case__ :Optional[Any] = None
snake_case__ :str = torch.ones(UpperCamelCase ,device=UpperCamelCase ,dtype=torch.int )
snake_case__ :Dict = torch.zeros(UpperCamelCase ,device=UpperCamelCase ,dtype=torch.bool )
if input_embeds is not None:
snake_case__ :List[Any] = input_embeds
else:
snake_case__ :Union[str, Any] = self.transformer.transformer.wte(UpperCamelCase )
for i in range(UpperCamelCase ):
snake_case__ :Tuple = self.transformer(inputs_embeds=UpperCamelCase )
snake_case__ :Tuple = outputs.logits
snake_case__ :int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case__ :List[Any] = logits.softmax(-1 ).log()
if scores is None:
snake_case__ :str = logits.topk(UpperCamelCase ,-1 )
snake_case__ :List[str] = generated.expand(UpperCamelCase ,*generated.shape[1:] )
snake_case__ :List[str] = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
snake_case__ :Union[str, Any] = next_tokens
else:
snake_case__ :List[Any] = tokens.expand(UpperCamelCase ,*tokens.shape[1:] )
snake_case__ :Optional[int] = torch.cat((tokens, next_tokens) ,dim=1 )
else:
snake_case__ :Union[str, Any] = -float(np.inf )
snake_case__ :Union[str, Any] = 0
snake_case__ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case__ :int = scores_sum / seq_lengths[:, None]
snake_case__ :List[str] = scores_sum_average.view(-1 ).topk(UpperCamelCase ,-1 )
snake_case__ :Tuple = next_tokens // scores_sum.shape[1]
snake_case__ :Any = seq_lengths[next_tokens_source]
snake_case__ :Optional[int] = next_tokens % scores_sum.shape[1]
snake_case__ :Union[str, Any] = next_tokens.unsqueeze(1 )
snake_case__ :Optional[Any] = tokens[next_tokens_source]
snake_case__ :List[str] = torch.cat((tokens, next_tokens) ,dim=1 )
snake_case__ :str = generated[next_tokens_source]
snake_case__ :Tuple = scores_sum_average * seq_lengths
snake_case__ :List[str] = is_stopped[next_tokens_source]
snake_case__ :Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
snake_case__ :List[Any] = torch.cat((generated, next_token_embed) ,dim=1 )
snake_case__ :Tuple = is_stopped + next_tokens.eq(UpperCamelCase ).squeeze()
if is_stopped.all():
break
snake_case__ :List[Any] = scores / seq_lengths
snake_case__ :Optional[int] = scores.argsort(descending=UpperCamelCase )
# tokens tensors are already padded to max_seq_length
snake_case__ :Optional[int] = [tokens[i] for i in order]
snake_case__ :List[Any] = torch.stack(UpperCamelCase ,dim=0 )
snake_case__ :Dict = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 705
|
def lowercase_ ( __snake_case : int ) -> bool:
'''simple docstring'''
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
snake_case__ :List[str] = 4
snake_case__ :Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
snake_case__ :List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 57
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowercase_ ( __snake_case : Tuple ) -> List[str]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _snake_case ( _A ):
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> List[Any]:
snake_case__ :Any = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" ,type=UpperCamelCase ,default=UpperCamelCase ,help="Path to location to store the models" )
download_parser.add_argument(
"--force" ,action="store_true" ,help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" ,action="store_true" ,help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" ,)
download_parser.add_argument("model" ,type=UpperCamelCase ,help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Union[str, Any]:
snake_case__ :List[Any] = model
snake_case__ :int = cache
snake_case__ :Union[str, Any] = force
snake_case__ :Dict = trust_remote_code
def lowerCAmelCase_ ( self ) -> Tuple:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 706
|
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
| 0
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__UpperCAmelCase : List[Any] = datasets.utils.logging.get_logger(__name__)
class _snake_case ( folder_based_builder.FolderBasedBuilderConfig ):
_A = None
_A = None
class _snake_case ( folder_based_builder.FolderBasedBuilder ):
_A = datasets.Audio()
_A = 'audio'
_A = AudioFolderConfig
_A = 42 # definition at the bottom of the script
_A = AudioClassification(audio_column='audio' , label_column='label' )
__UpperCAmelCase : Optional[int] = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__UpperCAmelCase : List[str] = AUDIO_EXTENSIONS
| 707
|
def lowercase_ ( __snake_case : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 57
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : str ):
'''simple docstring'''
def get_masked_lm_array(__snake_case : str ):
snake_case__ :Tuple = F'masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'
snake_case__ :Tuple = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
snake_case__ :List[str] = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_array(__snake_case : str ):
snake_case__ :Optional[Any] = F'encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'
snake_case__ :Union[str, Any] = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
snake_case__ :Optional[int] = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_layer_array(__snake_case : int , __snake_case : str ):
snake_case__ :Dict = F'encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'
snake_case__ :Optional[int] = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
snake_case__ :Any = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_attention_layer_array(__snake_case : int , __snake_case : str , __snake_case : Dict ):
snake_case__ :List[Any] = F'encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'
snake_case__ :Optional[Any] = tf.train.load_variable(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = array.reshape(__snake_case )
if "kernel" in name:
snake_case__ :Optional[Any] = array.transpose()
return torch.from_numpy(__snake_case )
print(F'Loading model based on config from {config_path}...' )
snake_case__ :Optional[Any] = BertConfig.from_json_file(__snake_case )
snake_case__ :Any = BertForMaskedLM(__snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
snake_case__ :BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
snake_case__ :BertSelfAttention = layer.attention.self
snake_case__ :Union[str, Any] = get_encoder_attention_layer_array(
__snake_case , "_query_dense/kernel" , self_attn.query.weight.data.shape )
snake_case__ :Any = get_encoder_attention_layer_array(
__snake_case , "_query_dense/bias" , self_attn.query.bias.data.shape )
snake_case__ :Optional[Any] = get_encoder_attention_layer_array(
__snake_case , "_key_dense/kernel" , self_attn.key.weight.data.shape )
snake_case__ :Tuple = get_encoder_attention_layer_array(
__snake_case , "_key_dense/bias" , self_attn.key.bias.data.shape )
snake_case__ :Any = get_encoder_attention_layer_array(
__snake_case , "_value_dense/kernel" , self_attn.value.weight.data.shape )
snake_case__ :Optional[Any] = get_encoder_attention_layer_array(
__snake_case , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
snake_case__ :BertSelfOutput = layer.attention.output
snake_case__ :Optional[Any] = get_encoder_attention_layer_array(
__snake_case , "_output_dense/kernel" , self_output.dense.weight.data.shape )
snake_case__ :Tuple = get_encoder_attention_layer_array(
__snake_case , "_output_dense/bias" , self_output.dense.bias.data.shape )
snake_case__ :Any = get_encoder_layer_array(__snake_case , "_attention_layer_norm/gamma" )
snake_case__ :Dict = get_encoder_layer_array(__snake_case , "_attention_layer_norm/beta" )
# Intermediate
snake_case__ :BertIntermediate = layer.intermediate
snake_case__ :Optional[int] = get_encoder_layer_array(__snake_case , "_intermediate_dense/kernel" )
snake_case__ :Union[str, Any] = get_encoder_layer_array(__snake_case , "_intermediate_dense/bias" )
# Output
snake_case__ :BertOutput = layer.output
snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_dense/kernel" )
snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_dense/bias" )
snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_layer_norm/gamma" )
snake_case__ :Any = get_encoder_layer_array(__snake_case , "_output_layer_norm/beta" )
# Embeddings
snake_case__ :Dict = get_encoder_array("_position_embedding_layer/embeddings" )
snake_case__ :Any = get_encoder_array("_type_embedding_layer/embeddings" )
snake_case__ :Dict = get_encoder_array("_embedding_norm_layer/gamma" )
snake_case__ :List[str] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
snake_case__ :Any = model.cls.predictions.transform
snake_case__ :List[str] = get_masked_lm_array("dense/kernel" )
snake_case__ :Optional[int] = get_masked_lm_array("dense/bias" )
snake_case__ :List[Any] = get_masked_lm_array("layer_norm/gamma" )
snake_case__ :Optional[Any] = get_masked_lm_array("layer_norm/beta" )
snake_case__ :Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
snake_case__ :Union[str, Any] = BertPooler(config=__snake_case )
snake_case__ :BertPooler = get_encoder_array("_pooler_layer/kernel" )
snake_case__ :BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(__snake_case )
# Integration test - should load without any errors ;)
snake_case__ :str = BertForMaskedLM.from_pretrained(__snake_case )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__UpperCAmelCase : List[str] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 708
|
def lowercase_ ( __snake_case : int = 10_00 ) -> int:
'''simple docstring'''
snake_case__ :int = 3
snake_case__ :int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 57
| 0
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCAmelCase : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Union[str, Any] ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
snake_case__ :Optional[Any] = getattr(__snake_case , __snake_case ).shape
else:
snake_case__ :str = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case__ :Optional[int] = value
elif weight_type == "weight_g":
snake_case__ :Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ :Any = value
elif weight_type == "bias":
snake_case__ :int = value
else:
snake_case__ :Optional[int] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __snake_case : Any , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Any = []
snake_case__ :Any = fairseq_model.state_dict()
snake_case__ :Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case__ :str = None
for name, value in fairseq_dict.items():
snake_case__ :str = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case__ :int = True
elif name.split("." )[0] == "proj":
snake_case__ :Tuple = fairseq_model.proj
snake_case__ :Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case__ :Optional[int] = True
if "*" in mapped_key:
snake_case__ :Tuple = name.split(__snake_case )[0].split("." )[-2]
snake_case__ :Tuple = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
snake_case__ :Dict = "weight_g"
elif "weight_v" in name:
snake_case__ :Union[str, Any] = "weight_v"
elif "bias" in name:
snake_case__ :Optional[int] = "bias"
elif "weight" in name:
snake_case__ :List[Any] = "weight"
else:
snake_case__ :Any = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = full_name.split("conv_layers." )[-1]
snake_case__ :Optional[int] = name.split("." )
snake_case__ :Tuple = int(items[0] )
snake_case__ :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case__ :Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case__ :List[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case__ :Any = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case__ :Dict = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def lowercase_ ( __snake_case : int ) -> Dict:
'''simple docstring'''
snake_case__ :Optional[int] = emb.weight.shape
snake_case__ :Union[str, Any] = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
snake_case__ :str = emb.weight.data
return lin_layer
def lowercase_ ( __snake_case : Any ) -> Optional[Any]:
'''simple docstring'''
with open(__snake_case , "r" , encoding="utf-8" ) as f:
snake_case__ :Union[str, Any] = f.readlines()
snake_case__ :Dict = [line.split(" " )[0] for line in lines]
snake_case__ :Union[str, Any] = len(__snake_case )
snake_case__ :int = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(__snake_case , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowercase_ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple , __snake_case : int , __snake_case : List[str] , ) -> str:
'''simple docstring'''
snake_case__ :List[str] = WavaVecaConfig.from_pretrained(__snake_case )
snake_case__ :Optional[int] = SpeechaTextaConfig.from_pretrained(
__snake_case , vocab_size=__snake_case , decoder_layers=__snake_case , do_stable_layer_norm=__snake_case )
snake_case__ :Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
snake_case__ :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ :Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
snake_case__ :str = WavaVecaModel(__snake_case )
snake_case__ :Optional[int] = recursively_load_weights_wavaveca(model.encoder , __snake_case )
snake_case__ :Union[str, Any] = SpeechaTextaForCausalLM(__snake_case )
snake_case__ :Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__snake_case )
# set output linear layer
unexpected_keys.remove("embed_out" )
snake_case__ :int = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case__ :List[Any] = SpeechEncoderDecoderModel(encoder=__snake_case , decoder=__snake_case )
snake_case__ :List[Any] = False
# add projection layer
snake_case__ :int = nn.Parameter(projection_layer.weight )
snake_case__ :Optional[int] = nn.Parameter(projection_layer.bias )
snake_case__ :Union[str, Any] = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case , "vocab.json" ) , "w" ) as fp:
json.dump(__snake_case , __snake_case )
snake_case__ :Tuple = SpeechaTextaTokenizer(os.path.join(__snake_case , "vocab.json" ) )
tokenizer.save_pretrained(__snake_case )
snake_case__ :Dict = hf_wavavec.config.to_dict()
snake_case__ :int = tokenizer.pad_token_id
snake_case__ :Union[str, Any] = tokenizer.bos_token_id
snake_case__ :Tuple = tokenizer.eos_token_id
snake_case__ :Optional[Any] = "speech_to_text_2"
snake_case__ :Optional[int] = "wav2vec2"
snake_case__ :Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 709
|
import os
import sys
import unittest
__UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCAmelCase : Tuple = os.path.join(git_repo_path, "src", "diffusers")
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Tuple = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCamelCase ,"torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ :Tuple = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ :str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCamelCase ,"torch_and_transformers_and_onnx" )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" ,UpperCamelCase )
self.assertIn("torch_and_transformers" ,UpperCamelCase )
self.assertIn("flax_and_transformers" ,UpperCamelCase )
self.assertIn("torch_and_transformers_and_onnx" ,UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" ,objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" ,objects["flax"] )
self.assertIn("StableDiffusionPipeline" ,objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" ,objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" ,objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" ,objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = create_dummy_object("CONSTANT" ,"'torch'" )
self.assertEqual(UpperCamelCase ,"\nCONSTANT = None\n" )
snake_case__ :Optional[Any] = create_dummy_object("function" ,"'torch'" )
self.assertEqual(
UpperCamelCase ,"\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
snake_case__ :str = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
snake_case__ :List[str] = create_dummy_object("FakeClass" ,"'torch'" )
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
snake_case__ :int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] ,UpperCamelCase )
| 57
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( _A ):
_A = (DDPMScheduler,)
def lowerCAmelCase_ ( self ,**UpperCamelCase ) -> List[Any]:
snake_case__ :Any = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCamelCase )
return config
def lowerCAmelCase_ ( self ) -> Dict:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase ,beta_end=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
self.check_over_configs(thresholding=UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase ,prediction_type=UpperCamelCase ,sample_max_value=UpperCamelCase ,)
def lowerCAmelCase_ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :Optional[int] = self.scheduler_classes[0]
snake_case__ :Tuple = self.get_scheduler_config()
snake_case__ :Union[str, Any] = scheduler_class(**UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :str = self.scheduler_classes[0]
snake_case__ :Optional[Any] = self.get_scheduler_config()
snake_case__ :str = scheduler_class(**UpperCamelCase )
snake_case__ :Any = len(UpperCamelCase )
snake_case__ :Union[str, Any] = self.dummy_model()
snake_case__ :Optional[Any] = self.dummy_sample_deter
snake_case__ :int = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase ) ):
# 1. predict noise residual
snake_case__ :Optional[int] = model(UpperCamelCase ,UpperCamelCase )
# 2. predict previous mean of sample x_t-1
snake_case__ :Union[str, Any] = scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,generator=UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ :Union[str, Any] = pred_prev_sample
snake_case__ :int = torch.sum(torch.abs(UpperCamelCase ) )
snake_case__ :List[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[Any] = self.scheduler_classes[0]
snake_case__ :Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case__ :int = scheduler_class(**UpperCamelCase )
snake_case__ :List[Any] = len(UpperCamelCase )
snake_case__ :int = self.dummy_model()
snake_case__ :Any = self.dummy_sample_deter
snake_case__ :List[str] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase ) ):
# 1. predict noise residual
snake_case__ :Optional[Any] = model(UpperCamelCase ,UpperCamelCase )
# 2. predict previous mean of sample x_t-1
snake_case__ :str = scheduler.step(UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,generator=UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
snake_case__ :List[Any] = pred_prev_sample
snake_case__ :str = torch.sum(torch.abs(UpperCamelCase ) )
snake_case__ :int = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :str = self.scheduler_classes[0]
snake_case__ :Dict = self.get_scheduler_config()
snake_case__ :Any = scheduler_class(**UpperCamelCase )
snake_case__ :Tuple = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
snake_case__ :List[Any] = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase ):
if i == len(UpperCamelCase ) - 1:
snake_case__ :Any = -1
else:
snake_case__ :Optional[Any] = timesteps[i + 1]
snake_case__ :str = scheduler.previous_timestep(UpperCamelCase )
snake_case__ :Any = prev_t.item()
self.assertEqual(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Dict = self.scheduler_classes[0]
snake_case__ :List[str] = self.get_scheduler_config()
snake_case__ :int = scheduler_class(**UpperCamelCase )
snake_case__ :Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase ,msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Optional[Any] = self.scheduler_classes[0]
snake_case__ :str = self.get_scheduler_config()
snake_case__ :List[Any] = scheduler_class(**UpperCamelCase )
snake_case__ :Tuple = [100, 87, 50, 1, 0]
snake_case__ :Optional[Any] = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase ,msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase ,timesteps=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :Union[str, Any] = self.scheduler_classes[0]
snake_case__ :Any = self.get_scheduler_config()
snake_case__ :List[str] = scheduler_class(**UpperCamelCase )
snake_case__ :List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase ,msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" ,):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 710
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase : str = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class _snake_case ( _A ):
_A = 'facebook/nllb-200-distilled-600M'
_A = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
_A = 'translator'
_A = AutoTokenizer
_A = AutoModelForSeqaSeqLM
_A = LANGUAGE_CODES
_A = ['text', 'text', 'text']
_A = ['text']
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Optional[int]:
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
snake_case__ :Optional[Any] = self.lang_to_code[src_lang]
snake_case__ :Union[str, Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase ,return_tensors="pt" ,src_lang=UpperCamelCase ,tgt_lang=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Tuple:
return self.model.generate(**UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> List[Any]:
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=UpperCamelCase )
| 711
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Tuple = mock.Mock()
snake_case__ :List[str] = 500
snake_case__ :Any = {}
snake_case__ :Union[str, Any] = HTTPError
snake_case__ :Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ :Any = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Dict:
# A mock response for an HTTP head request to emulate server down
snake_case__ :Union[str, Any] = mock.Mock()
snake_case__ :int = 500
snake_case__ :Any = {}
snake_case__ :Dict = HTTPError
snake_case__ :List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ :Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase ) as mock_head:
snake_case__ :Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase_ ( self ) -> int:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ :Union[str, Any] = tempfile.mktemp()
with open(UpperCamelCase ,"wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" ,UpperCamelCase )
snake_case__ :Tuple = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" ,"wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" ,UpperCamelCase )
snake_case__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case__ :Union[str, Any] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def lowerCAmelCase_ ( cls ) -> Optional[int]:
snake_case__ :List[str] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def lowerCAmelCase_ ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token ,repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase_ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[str] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :str = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("test-tokenizer" ,use_auth_token=self._token )
snake_case__ :Dict = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase ,repo_id="test-tokenizer" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :List[str] = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def lowerCAmelCase_ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :List[Any] = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Any = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" ,use_auth_token=self._token )
snake_case__ :Any = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase ,repo_id="valid_org/test-tokenizer-org" ,push_to_hub=UpperCamelCase ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase_ ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :str = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Optional[int] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :Union[str, Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ :int = os.path.join(UpperCamelCase ,"vocab.txt" )
with open(UpperCamelCase ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ :Tuple = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
snake_case__ :List[Any] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer" ,use_auth_token=self._token )
snake_case__ :List[Any] = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer' ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizerFast" )
snake_case__ :List[str] = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer' ,use_fast=UpperCamelCase ,trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,"CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data ,{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) ,["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Optional[Any] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) ,["A", "BC"] )
self.assertEqual(trie.split("BCA" ) ,["BC", "A"] )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case__ :Any = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) ,["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case__ :str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) ,["AB", "C"] )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case__ :Dict = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) ,["ABC", "D"] )
def lowerCAmelCase_ ( self ) -> int:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ :Optional[int] = Trie()
snake_case__ :Union[str, Any] = trie.cut_text("ABC" ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase ,["AB", "C"] )
| 57
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCAmelCase : Optional[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowercase_ ( __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Optional[int] = {}
with open(__snake_case , "r" ) as file:
for line_number, line in enumerate(__snake_case ):
snake_case__ :Any = line.strip()
if line:
snake_case__ :Optional[int] = line.split()
snake_case__ :int = line_number
snake_case__ :Tuple = words[0]
snake_case__ :str = value
return result
def lowercase_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case )
snake_case__ :int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
snake_case__ :Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case__ :List[str] = "param"
if weight_type is not None and weight_type != "param":
snake_case__ :Union[str, Any] = getattr(__snake_case , __snake_case ).shape
elif weight_type is not None and weight_type == "param":
snake_case__ :Optional[Any] = hf_pointer
for attribute in hf_param_name.split("." ):
snake_case__ :Any = getattr(__snake_case , __snake_case )
snake_case__ :Tuple = shape_pointer.shape
# let's reduce dimension
snake_case__ :Tuple = value[0]
else:
snake_case__ :Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case__ :Tuple = value
elif weight_type == "weight_g":
snake_case__ :Any = value
elif weight_type == "weight_v":
snake_case__ :Tuple = value
elif weight_type == "bias":
snake_case__ :Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
snake_case__ :Dict = getattr(__snake_case , __snake_case )
snake_case__ :Tuple = value
else:
snake_case__ :List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[int] ) -> str:
'''simple docstring'''
snake_case__ :str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
snake_case__ :Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case__ :Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
snake_case__ :List[str] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case__ :Optional[int] = ".".join([key, hf_param_name] )
else:
snake_case__ :List[str] = key
snake_case__ :Tuple = value if "lm_head" in full_key else value[0]
__UpperCAmelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowercase_ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=None , __snake_case : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :Tuple = False
for key, mapped_key in MAPPING.items():
snake_case__ :Optional[int] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case__ :Optional[int] = True
if "*" in mapped_key:
snake_case__ :Dict = name.split(__snake_case )[0].split("." )[-2]
snake_case__ :List[str] = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
snake_case__ :List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ :Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ :Optional[int] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ :Optional[int] = "weight"
else:
snake_case__ :int = None
if hf_dict is not None:
rename_dict(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
else:
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return is_used
return is_used
def lowercase_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Any ) -> int:
'''simple docstring'''
snake_case__ :Any = []
snake_case__ :Optional[Any] = fairseq_model.state_dict()
snake_case__ :List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ :Any = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
snake_case__ :int = True
else:
snake_case__ :str = load_wavaveca_layer(__snake_case , __snake_case , __snake_case )
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Dict ) -> str:
'''simple docstring'''
snake_case__ :Optional[Any] = full_name.split("conv_layers." )[-1]
snake_case__ :Union[str, Any] = name.split("." )
snake_case__ :List[str] = int(items[0] )
snake_case__ :Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case__ :Optional[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case__ :Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case__ :Tuple = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case__ :Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase_ ( __snake_case : Dict , __snake_case : int , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : Dict=True , __snake_case : Tuple=False ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
snake_case__ :List[str] = WavaVecaConfig.from_pretrained(__snake_case )
else:
snake_case__ :int = WavaVecaConfig()
if is_seq_class:
snake_case__ :str = read_txt_into_dict(__snake_case )
snake_case__ :Optional[Any] = idalabel
snake_case__ :Optional[int] = WavaVecaForSequenceClassification(__snake_case )
snake_case__ :List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
feature_extractor.save_pretrained(__snake_case )
elif is_finetuned:
if dict_path:
snake_case__ :Tuple = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ :str = target_dict.pad_index
snake_case__ :Tuple = target_dict.bos_index
snake_case__ :Union[str, Any] = target_dict.eos_index
snake_case__ :Union[str, Any] = len(target_dict.symbols )
snake_case__ :List[str] = os.path.join(__snake_case , "vocab.json" )
if not os.path.isdir(__snake_case ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
snake_case__ :Any = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ :Optional[Any] = 0
snake_case__ :str = 1
with open(__snake_case , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__snake_case , )
snake_case__ :int = True if config.feat_extract_norm == "layer" else False
snake_case__ :Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
snake_case__ :List[Any] = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
snake_case__ :Tuple = WavaVecaForCTC(__snake_case )
else:
snake_case__ :List[str] = WavaVecaForPreTraining(__snake_case )
if is_finetuned or is_seq_class:
snake_case__ :Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case__ :Optional[Any] = argparse.Namespace(task="audio_pretraining" )
snake_case__ :Optional[Any] = fairseq.tasks.setup_task(__snake_case )
snake_case__ :List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
snake_case__ :Any = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 712
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase : Optional[Any] = 1_6
__UpperCAmelCase : Optional[int] = 3_2
def lowercase_ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Optional[Any]:
'''simple docstring'''
snake_case__ :int = AutoTokenizer.from_pretrained(__snake_case )
snake_case__ :Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ :List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ :Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ :Any = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
snake_case__ :Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def lowercase_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
model.eval()
snake_case__ :Union[str, Any] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ :List[Any] = model(**__snake_case )
snake_case__ :Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__ , snake_case__ :Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
snake_case__ :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
snake_case__ :int = metric.compute()
return eval_metric["accuracy"]
def lowercase_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Any:
'''simple docstring'''
snake_case__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ :Union[str, Any] = config["lr"]
snake_case__ :List[str] = int(config["num_epochs"] )
snake_case__ :Optional[Any] = int(config["seed"] )
snake_case__ :List[Any] = int(config["batch_size"] )
snake_case__ :List[Any] = args.model_name_or_path
set_seed(__snake_case )
snake_case__ , snake_case__ :List[Any] = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ :List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
snake_case__ :int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ :Tuple = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ :List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ :Any = 1
snake_case__ :List[Any] = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ :Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
snake_case__ :Any = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ :int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
snake_case__ :Dict = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ :Union[str, Any] = 0
snake_case__ :List[str] = evaluate.load("glue" , "mrpc" )
snake_case__ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
snake_case__ :List[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
snake_case__ :Union[str, Any] = args.resume_from_checkpoint.split("epoch_" )[1]
snake_case__ :Dict = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
snake_case__ :str = int(__snake_case ) + 1
snake_case__ :List[Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
accelerator.print("resumed checkpoint performance:" , __snake_case )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , "r" ) as f:
snake_case__ :Tuple = json.load(__snake_case )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
snake_case__ :Optional[int] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
snake_case__ :str = model(**__snake_case )
snake_case__ :List[str] = outputs.loss
snake_case__ :List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
snake_case__ :int = F'epoch_{epoch}'
snake_case__ :str = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
snake_case__ :Union[str, Any] = evaluation_loop(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case__ :List[str] = accuracy
snake_case__ :List[str] = lr_scheduler.get_lr()[0]
snake_case__ :List[Any] = optimizer.param_groups[0]["lr"]
snake_case__ :Dict = epoch
snake_case__ :List[Any] = overall_step
accelerator.print(F'epoch {epoch}:' , __snake_case )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def lowercase_ ( ) -> Any:
'''simple docstring'''
snake_case__ :List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__snake_case , default=__snake_case , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=__snake_case , default=__snake_case , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=2 , help="Number of train epochs." , )
snake_case__ :Any = parser.parse_args()
snake_case__ :int = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Optional[Any] = order
# a_{0} ... a_{k}
snake_case__ :Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
snake_case__ :List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
snake_case__ :int = [0.0] * self.order
# y[n-1] ... y[n-k]
snake_case__ :Any = [0.0] * self.order
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> None:
if len(UpperCamelCase ) < self.order:
snake_case__ :Tuple = [1.0, *a_coeffs]
if len(UpperCamelCase ) != self.order + 1:
snake_case__ :Any = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(UpperCamelCase )}'
)
raise ValueError(UpperCamelCase )
if len(UpperCamelCase ) != self.order + 1:
snake_case__ :Union[str, Any] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(UpperCamelCase )}'
)
raise ValueError(UpperCamelCase )
snake_case__ :List[str] = a_coeffs
snake_case__ :List[str] = b_coeffs
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> float:
snake_case__ :Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 ,self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
snake_case__ :str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
snake_case__ :str = self.input_history[:-1]
snake_case__ :List[str] = self.output_history[:-1]
snake_case__ :Any = sample
snake_case__ :str = result
return result
| 713
|
from __future__ import annotations
class _snake_case :
def __init__( self ,UpperCamelCase ) -> None:
snake_case__ :Union[str, Any] = data
snake_case__ :Node | None = None
snake_case__ :Node | None = None
def lowercase_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( __snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( __snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
snake_case__ :Dict = Node(1 )
snake_case__ :int = Node(2 )
snake_case__ :Optional[Any] = Node(3 )
snake_case__ :Tuple = Node(4 )
snake_case__ :str = Node(5 )
snake_case__ :Optional[Any] = Node(6 )
snake_case__ :List[Any] = Node(7 )
snake_case__ :List[str] = Node(8 )
snake_case__ :Tuple = Node(9 )
print(is_full_binary_tree(__snake_case ) )
print(depth_of_tree(__snake_case ) )
print("Tree is: " )
display(__snake_case )
if __name__ == "__main__":
main()
| 57
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case__ :List[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
snake_case__ :List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
snake_case__ :Optional[int] = model(UpperCamelCase )["last_hidden_state"]
snake_case__ :Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,UpperCamelCase )
# compare the actual values for a slice.
snake_case__ :Optional[int] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 714
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCAmelCase : int = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__UpperCAmelCase : Any = [file for file in filepaths if " " in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__UpperCAmelCase : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__UpperCAmelCase : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : int = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
def lowercase_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case__ :Dict = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_ ( __snake_case : List[str] ) -> int:
'''simple docstring'''
return data[1:] + data[0]
def lowercase_ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = ""
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = int("0b" + data[0] + data[-1] , 2 )
snake_case__ :Union[str, Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_ ( __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case__ :Tuple = message[:4]
snake_case__ :int = message[4:]
snake_case__ :int = apply_table(__snake_case , __snake_case )
snake_case__ :Union[str, Any] = xor(__snake_case , __snake_case )
snake_case__ :Tuple = apply_sbox(__snake_case , temp[:4] ) # noqa: E741
snake_case__ :List[str] = apply_sbox(__snake_case , temp[4:] )
snake_case__ :int = "0" * (2 - len(__snake_case )) + l # noqa: E741
snake_case__ :int = "0" * (2 - len(__snake_case )) + r
snake_case__ :Optional[Any] = apply_table(l + r , __snake_case )
snake_case__ :Tuple = xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
__UpperCAmelCase : Dict = input("Enter 10 bit key: ")
__UpperCAmelCase : Tuple = input("Enter 8 bit message: ")
__UpperCAmelCase : Any = [6, 3, 7, 4, 8, 5, 1_0, 9]
__UpperCAmelCase : List[str] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__UpperCAmelCase : Tuple = [2, 4, 3, 1]
__UpperCAmelCase : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__UpperCAmelCase : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
__UpperCAmelCase : Optional[int] = [4, 1, 2, 3, 2, 3, 4, 1]
__UpperCAmelCase : List[Any] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__UpperCAmelCase : Union[str, Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__UpperCAmelCase : int = apply_table(key, paa_table)
__UpperCAmelCase : Dict = temp[:5]
__UpperCAmelCase : Optional[int] = temp[5:]
__UpperCAmelCase : Optional[int] = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : int = apply_table(left + right, pa_table)
__UpperCAmelCase : Tuple = left_shift(left)
__UpperCAmelCase : Union[str, Any] = left_shift(right)
__UpperCAmelCase : Dict = left_shift(left)
__UpperCAmelCase : Optional[Any] = left_shift(right)
__UpperCAmelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
__UpperCAmelCase : Tuple = apply_table(message, IP)
__UpperCAmelCase : Tuple = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : List[Any] = temp[4:] + temp[:4]
__UpperCAmelCase : int = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
__UpperCAmelCase : List[Any] = apply_table(CT, IP)
__UpperCAmelCase : List[Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : int = temp[4:] + temp[:4]
__UpperCAmelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
__UpperCAmelCase : Union[str, Any] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 57
| 0
|
def lowercase_ ( __snake_case : Tuple ) -> Any:
'''simple docstring'''
snake_case__ :Dict = 1
snake_case__ :Any = 2
while i * i <= n:
snake_case__ :List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase_ ( ) -> int:
'''simple docstring'''
snake_case__ :List[str] = 1
snake_case__ :Optional[int] = 1
while True:
i += 1
t_num += i
if count_divisors(__snake_case ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 716
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask
| 57
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( _A ):
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( UpperCamelCase ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
raise NotImplementedError()
| 717
|
__UpperCAmelCase : int = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
__UpperCAmelCase : List[str] = ["a", "b", "c", "d", "e"]
def lowercase_ ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case__ :List[Any] = start
# add current to visited
visited.append(__snake_case )
snake_case__ :List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
snake_case__ :Any = topological_sort(__snake_case , __snake_case , __snake_case )
# return sort
return sort
if __name__ == "__main__":
__UpperCAmelCase : Tuple = topological_sort("a", [], [])
print(sort)
| 57
| 0
|
from __future__ import annotations
from math import pi
def lowercase_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ , snake_case__ :Tuple = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :List[str] = controlnet_params
snake_case__ :Union[str, Any] = "bird"
snake_case__ :Optional[int] = jax.device_count()
snake_case__ :Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
snake_case__ :str = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :str = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :int = replicate(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :Any = shard(UpperCamelCase )
snake_case__ :str = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :Any = images[0, 253:256, 253:256, -1]
snake_case__ :Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[Any] = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ , snake_case__ :List[str] = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ , snake_case__ :Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" ,controlnet=UpperCamelCase ,from_pt=UpperCamelCase ,dtype=jnp.bfloataa )
snake_case__ :str = controlnet_params
snake_case__ :int = "Chef in the kitchen"
snake_case__ :List[Any] = jax.device_count()
snake_case__ :Dict = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ :Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
snake_case__ :Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ :List[str] = jax.random.PRNGKey(0 )
snake_case__ :Any = jax.random.split(UpperCamelCase ,jax.device_count() )
snake_case__ :Dict = replicate(UpperCamelCase )
snake_case__ :Tuple = shard(UpperCamelCase )
snake_case__ :Optional[int] = shard(UpperCamelCase )
snake_case__ :Optional[Any] = pipe(
prompt_ids=UpperCamelCase ,image=UpperCamelCase ,params=UpperCamelCase ,prng_seed=UpperCamelCase ,num_inference_steps=50 ,jit=UpperCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ :int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ :List[str] = images[0, 253:256, 253:256, -1]
snake_case__ :Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ :List[str] = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 57
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {}
class _snake_case ( _A ):
_A = 'llama'
_A = ['past_key_values']
def __init__( self ,UpperCamelCase=32_000 ,UpperCamelCase=4_096 ,UpperCamelCase=11_008 ,UpperCamelCase=32 ,UpperCamelCase=32 ,UpperCamelCase=None ,UpperCamelCase="silu" ,UpperCamelCase=2_048 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-6 ,UpperCamelCase=True ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=2 ,UpperCamelCase=1 ,UpperCamelCase=False ,UpperCamelCase=None ,**UpperCamelCase ,) -> Tuple:
snake_case__ :int = vocab_size
snake_case__ :Any = max_position_embeddings
snake_case__ :int = hidden_size
snake_case__ :List[Any] = intermediate_size
snake_case__ :int = num_hidden_layers
snake_case__ :Union[str, Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
snake_case__ :Dict = num_attention_heads
snake_case__ :List[str] = num_key_value_heads
snake_case__ :Optional[int] = hidden_act
snake_case__ :Any = initializer_range
snake_case__ :Dict = rms_norm_eps
snake_case__ :List[str] = pretraining_tp
snake_case__ :Any = use_cache
snake_case__ :Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,**UpperCamelCase ,)
def lowerCAmelCase_ ( self ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
snake_case__ :Optional[int] = self.rope_scaling.get("type" ,UpperCamelCase )
snake_case__ :Optional[Any] = self.rope_scaling.get("factor" ,UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase ,UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 719
|
def lowercase_ ( __snake_case : list ) -> list:
'''simple docstring'''
if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__snake_case ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 57
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCAmelCase : int = ""
__UpperCAmelCase : Optional[int] = ""
__UpperCAmelCase : str = ""
__UpperCAmelCase : Optional[int] = 1 # (0 is vertical, 1 is horizontal)
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :Dict = get_dataset(__snake_case , __snake_case )
print("Processing..." )
snake_case__ :Tuple = update_image_and_anno(__snake_case , __snake_case , __snake_case )
for index, image in enumerate(__snake_case ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ :str = random_chars(32 )
snake_case__ :Tuple = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
snake_case__ :List[str] = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(__snake_case )} with {file_name}' )
snake_case__ :Optional[int] = []
for anno in new_annos[index]:
snake_case__ :Dict = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__snake_case )
with open(F'/{file_root}.txt' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase_ ( __snake_case : str , __snake_case : str ) -> tuple[list, list]:
'''simple docstring'''
snake_case__ :Optional[int] = []
snake_case__ :Dict = []
for label_file in glob.glob(os.path.join(__snake_case , "*.txt" ) ):
snake_case__ :List[str] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__snake_case ) as in_file:
snake_case__ :str = in_file.readlines()
snake_case__ :Dict = os.path.join(__snake_case , F'{label_name}.jpg' )
snake_case__ :Optional[Any] = []
for obj_list in obj_lists:
snake_case__ :Union[str, Any] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : int = 1 ) -> tuple[list, list, list]:
'''simple docstring'''
snake_case__ :int = []
snake_case__ :List[Any] = []
snake_case__ :Optional[int] = []
for idx in range(len(__snake_case ) ):
snake_case__ :Any = []
snake_case__ :Any = img_list[idx]
path_list.append(__snake_case )
snake_case__ :Dict = anno_list[idx]
snake_case__ :Tuple = cva.imread(__snake_case )
if flip_type == 1:
snake_case__ :Dict = cva.flip(__snake_case , __snake_case )
for bbox in img_annos:
snake_case__ :int = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ :Union[str, Any] = cva.flip(__snake_case , __snake_case )
for bbox in img_annos:
snake_case__ :str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__snake_case )
new_imgs_list.append(__snake_case )
return new_imgs_list, new_annos_lists, path_list
def lowercase_ ( __snake_case : int = 32 ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case__ :Dict = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 720
|
from __future__ import annotations
def lowercase_ ( __snake_case : list ) -> float:
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase : List[Any] = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721
|
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
| 0
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = 'ClapFeatureExtractor'
lowercase : List[Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , __A , __A ) -> Union[str, Any]:
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> List[Any]:
_lowerCAmelCase =kwargs.pop('sampling_rate' , __A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if audios is not None:
_lowerCAmelCase =self.feature_extractor(
__A , sampling_rate=__A , return_tensors=__A , **__A )
if text is not None and audios is not None:
_lowerCAmelCase =audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> int:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 58
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 1
|
'''simple docstring'''
import random
def UpperCamelCase__ ( a__ , a__ , a__ = False ):
'''simple docstring'''
_lowerCAmelCase ={i: [] for i in range(a__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a__ ):
for j in range(i + 1 , a__ ):
if random.random() < probability:
graph[i].append(a__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a__ )
return graph
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return {
i: [j for j in range(a__ ) if i != j] for i in range(a__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import os
import numpy
import onnx
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =a.name
_lowerCAmelCase =b.name
_lowerCAmelCase =''
_lowerCAmelCase =''
_lowerCAmelCase =a == b
_lowerCAmelCase =name_a
_lowerCAmelCase =name_b
return res
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =list(model.graph.initializer )
_lowerCAmelCase =list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowerCAmelCase =inits[i].name
_lowerCAmelCase =inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.dirname(a__ )
_lowerCAmelCase =os.path.basename(a__ )
_lowerCAmelCase =onnx.load(os.path.join(a__ , a__ ) )
_lowerCAmelCase =list(model.graph.initializer )
_lowerCAmelCase =set()
_lowerCAmelCase ={}
_lowerCAmelCase =[]
_lowerCAmelCase =0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
_lowerCAmelCase =inits[j].data_type
_lowerCAmelCase =numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , a__ )
total_reduced_size += mem_size
_lowerCAmelCase =inits[i].name
_lowerCAmelCase =inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
_lowerCAmelCase =[name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
_lowerCAmelCase =sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
_lowerCAmelCase ='optimized_' + model_file_name
_lowerCAmelCase =os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 58
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 1
|
'''simple docstring'''
lowercase_ = {}
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_lowerCAmelCase =(days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_lowerCAmelCase =_calculate(days - 1 , a__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_lowerCAmelCase =_calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_lowerCAmelCase =_calculate(days - 1 , a__ , 0 )
_lowerCAmelCase =state_late + state_absent + state_ontime
_lowerCAmelCase =prizestrings
return prizestrings
def UpperCamelCase__ ( a__ = 3_0 ):
'''simple docstring'''
return _calculate(a__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 58
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''{sampling_rate}'''
_lowerCAmelCase ='1'
_lowerCAmelCase ='f32le'
_lowerCAmelCase =[
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCAmelCase =ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_lowerCAmelCase =output_stream[0]
_lowerCAmelCase =np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCamelCase__ ( a__ , a__ , a__ = "f32le" , ):
'''simple docstring'''
_lowerCAmelCase =F'''{sampling_rate}'''
_lowerCAmelCase ='1'
if format_for_conversion == "s16le":
_lowerCAmelCase =2
elif format_for_conversion == "f32le":
_lowerCAmelCase =4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_lowerCAmelCase =platform.system()
if system == "Linux":
_lowerCAmelCase ='alsa'
_lowerCAmelCase ='default'
elif system == "Darwin":
_lowerCAmelCase ='avfoundation'
_lowerCAmelCase =':0'
elif system == "Windows":
_lowerCAmelCase ='dshow'
_lowerCAmelCase ='default'
_lowerCAmelCase =[
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_lowerCAmelCase =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCAmelCase =_ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCamelCase__ ( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
_lowerCAmelCase =stream_chunk_s
else:
_lowerCAmelCase =chunk_length_s
_lowerCAmelCase =ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
_lowerCAmelCase =np.intaa
_lowerCAmelCase =2
elif format_for_conversion == "f32le":
_lowerCAmelCase =np.floataa
_lowerCAmelCase =4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_lowerCAmelCase =chunk_length_s / 6
_lowerCAmelCase =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
_lowerCAmelCase =[stride_length_s, stride_length_s]
_lowerCAmelCase =int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCAmelCase =int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCAmelCase =datetime.datetime.now()
_lowerCAmelCase =datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
_lowerCAmelCase =np.frombuffer(item['raw'] , dtype=a__ )
_lowerCAmelCase =(
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_lowerCAmelCase =sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase__ ( a__ , a__ , a__ , a__ = False ):
'''simple docstring'''
_lowerCAmelCase =B''
_lowerCAmelCase , _lowerCAmelCase =stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_lowerCAmelCase =0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
_lowerCAmelCase =(_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
_lowerCAmelCase =(_stride_left, stride_right)
_lowerCAmelCase ={'raw': acc[:chunk_len], 'stride': stride}
if stream:
_lowerCAmelCase =False
yield item
_lowerCAmelCase =stride_left
_lowerCAmelCase =acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
_lowerCAmelCase ={'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_lowerCAmelCase =False
yield item
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =2**2_4 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
_lowerCAmelCase =ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
lowercase : Tuple = MODEL_FOR_MASKED_LM_MAPPING
lowercase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCamelCase__ ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
_lowerCAmelCase =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_8015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_5506, 'token_str': ' accuser'},
] , )
_lowerCAmelCase =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 3_8015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 2_5506,
'token_str': ' accuser',
},
] , )
_lowerCAmelCase =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_3606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
_lowerCAmelCase =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_5676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS'},
] , )
_lowerCAmelCase =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 3_5676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS'},
] , )
_lowerCAmelCase =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_3606, 'token_str': ' Clara'},
] , )
_lowerCAmelCase =unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(__A , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 3_5676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 3_5676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 1_6416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
_lowerCAmelCase =pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__A , __A )
@slow
@require_torch
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(__A )
@slow
@require_tf
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(__A )
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase =unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__A ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] , )
_lowerCAmelCase =unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__A ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 1_2790,
'token_str': ' Lyon',
},
] , )
_lowerCAmelCase =unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__A ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 1_3606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
_lowerCAmelCase =None
_lowerCAmelCase =None
self.run_pipeline_test(__A , [] )
@require_tf
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
_lowerCAmelCase =None
_lowerCAmelCase =None
self.run_pipeline_test(__A , [] )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A )
_lowerCAmelCase =[
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def UpperCamelCase__ ( self , __A , __A ) -> Tuple:
_lowerCAmelCase =fill_masker.tokenizer
_lowerCAmelCase =fill_masker.model
_lowerCAmelCase =fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
] , )
_lowerCAmelCase =fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
] , )
_lowerCAmelCase =fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
__A , [
[
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
],
[
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
],
] , )
with self.assertRaises(__A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__A ):
fill_masker('This is' )
self.run_test_top_k(__A , __A )
self.run_test_targets(__A , __A )
self.run_test_top_k_targets(__A , __A )
self.fill_mask_with_duplicate_targets_and_top_k(__A , __A )
self.fill_mask_with_multiple_masks(__A , __A )
def UpperCamelCase__ ( self , __A , __A ) -> str:
_lowerCAmelCase =tokenizer.get_vocab()
_lowerCAmelCase =sorted(vocab.keys() )[:2]
# Pipeline argument
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A , targets=__A )
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
] , )
_lowerCAmelCase ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , __A )
_lowerCAmelCase =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(__A ) )
# Call argument
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A )
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=__A )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
] , )
_lowerCAmelCase ={vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , __A )
_lowerCAmelCase =[tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(__A ) )
# Score equivalence
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=__A )
_lowerCAmelCase =[top_mask['token_str'] for top_mask in outputs]
_lowerCAmelCase =[top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__A ) == set(__A ):
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=__A )
_lowerCAmelCase =[top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) )
# Raises with invalid
with self.assertRaises(__A ):
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__A ):
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''] )
with self.assertRaises(__A ):
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='' )
def UpperCamelCase__ ( self , __A , __A ) -> str:
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A , top_k=2 )
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
] , )
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A )
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
__A , [
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
] , )
self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) )
def UpperCamelCase__ ( self , __A , __A ) -> Tuple:
_lowerCAmelCase =tokenizer.get_vocab()
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A )
# top_k=2, ntargets=3
_lowerCAmelCase =sorted(vocab.keys() )[:3]
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_lowerCAmelCase =[el['token_str'] for el in sorted(__A , key=lambda __A : x["score"] , reverse=__A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__A ).issubset(__A ):
_lowerCAmelCase =fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__A ) , nested_simplify(__A ) )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A )
_lowerCAmelCase =tokenizer.get_vocab()
# String duplicates + id duplicates
_lowerCAmelCase =sorted(vocab.keys() )[:3]
_lowerCAmelCase =[targets[0], targets[1], targets[0], targets[2], targets[1]]
_lowerCAmelCase =fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=__A , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__A ) , 3 )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =FillMaskPipeline(model=__A , tokenizer=__A )
_lowerCAmelCase =fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
__A , [
[
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
],
[
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
],
[
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
{'sequence': ANY(__A ), 'score': ANY(__A ), 'token': ANY(__A ), 'token_str': ANY(__A )},
],
] , )
| 58
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : str = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = True , __A = 1 / 255 , __A = None , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =size if size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A )
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A , param_name='crop_size' )
_lowerCAmelCase =do_resize
_lowerCAmelCase =do_rescale
_lowerCAmelCase =do_normalize
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "shortest_edge" in size:
_lowerCAmelCase =get_resize_output_image_size(__A , size=size['shortest_edge'] , default_to_square=__A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCAmelCase =(size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' , default_to_square=__A )
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__A )
if not is_batched(__A ):
_lowerCAmelCase =[images]
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 58
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowercase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'ernie_m'
lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __A = 25_0002 , __A = 768 , __A = 12 , __A = 12 , __A = 3072 , __A = "gelu" , __A = 0.1 , __A = 0.1 , __A = 514 , __A = 0.02 , __A = 1 , __A = 1E-05 , __A=None , __A=False , __A=0.0 , **__A , ) -> Optional[Any]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =classifier_dropout
_lowerCAmelCase =is_decoder
_lowerCAmelCase =act_dropout
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 0
lowercase : bool = False
lowercase : float = 3.0
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__A ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCamelCase__ ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowerCAmelCase =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase =Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __A )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(100, 200)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ''''''
lowercase_ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_lowerCAmelCase =k.replace(a__ , a__ )
return k
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =DEFAULTS.copy()
cfg_kwargs.update(a__ )
_lowerCAmelCase =PegasusConfig(**a__ )
_lowerCAmelCase =PegasusForConditionalGeneration(a__ )
_lowerCAmelCase =torch_model.model.state_dict()
_lowerCAmelCase ={}
for k, v in tf_weights.items():
_lowerCAmelCase =rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
_lowerCAmelCase =v.T
_lowerCAmelCase =torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
_lowerCAmelCase =torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_lowerCAmelCase =mapping['shared.weight']
_lowerCAmelCase =mapping['shared.weight']
_lowerCAmelCase ={k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**a__ )
_lowerCAmelCase , _lowerCAmelCase =torch_model.model.load_state_dict(a__ , strict=a__ )
_lowerCAmelCase =[
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase__ ( a__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_lowerCAmelCase =tf.train.list_variables(a__ )
_lowerCAmelCase ={}
_lowerCAmelCase =['Adafactor', 'global_step']
for name, shape in tqdm(a__ , desc='converting tf checkpoint to dict' ):
_lowerCAmelCase =any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCAmelCase =tf.train.load_variable(a__ , a__ )
_lowerCAmelCase =array
return tf_weights
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =Path(a__ ).parent.name
_lowerCAmelCase =task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
_lowerCAmelCase =PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
_lowerCAmelCase =get_tf_weights_as_numpy(a__ )
_lowerCAmelCase =task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
_lowerCAmelCase =task_specific_params
_lowerCAmelCase =convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
_lowerCAmelCase =torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(a__ , Path(a__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ = parser.parse_args()
if args.save_dir is None:
lowercase_ = Path(args.tf_ckpt_path).parent.name
lowercase_ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 58
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 1
|
'''simple docstring'''
import baseaa
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return baseaa.baadecode(a__ ).decode('utf-8' )
if __name__ == "__main__":
lowercase_ = '''Hello World!'''
lowercase_ = baseaa_encode(test)
print(encoded)
lowercase_ = baseaa_decode(encoded)
print(decoded)
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 1
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def UpperCamelCase__ ( a__ , a__ , a__ , a__="attention" ):
'''simple docstring'''
_lowerCAmelCase =_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
_lowerCAmelCase =k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
_lowerCAmelCase =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
_lowerCAmelCase =q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_lowerCAmelCase =np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
_lowerCAmelCase =v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase__ ( a__ , a__ , a__ , a__=False ):
'''simple docstring'''
if split_mlp_wi:
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
_lowerCAmelCase =(wi_a, wi_a)
else:
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
_lowerCAmelCase =params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def UpperCamelCase__ ( a__ , *, a__ , a__ , a__ = False ):
'''simple docstring'''
_lowerCAmelCase =traverse_util.flatten_dict(variables['target'] )
_lowerCAmelCase ={'/'.join(a__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_lowerCAmelCase ='encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , a__ )
_lowerCAmelCase =collections.OrderedDict()
# Shared embeddings.
_lowerCAmelCase =old['token_embedder/embedding']
# Encoder.
for i in range(a__ ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase =tax_layer_norm_lookup(a__ , a__ , 'encoder' , 'pre_attention_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(a__ , a__ , 'encoder' , 'attention' )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 1 (MLP).
_lowerCAmelCase =tax_layer_norm_lookup(a__ , a__ , 'encoder' , 'pre_mlp_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase =tax_mlp_lookup(a__ , a__ , 'encoder' , a__ )
_lowerCAmelCase =layer_norm
if split_mlp_wi:
_lowerCAmelCase =wi[0].T
_lowerCAmelCase =wi[1].T
else:
_lowerCAmelCase =wi.T
_lowerCAmelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase =tax_relpos_bias_lookup(
a__ , a__ , 'encoder' ).T
_lowerCAmelCase =old['encoder/encoder_norm/scale']
if not scalable_attention:
_lowerCAmelCase =tax_relpos_bias_lookup(
a__ , 0 , 'encoder' ).T
_lowerCAmelCase =tax_relpos_bias_lookup(
a__ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(a__ ):
# Block i, layer 0 (Self Attention).
_lowerCAmelCase =tax_layer_norm_lookup(a__ , a__ , 'decoder' , 'pre_self_attention_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(a__ , a__ , 'decoder' , 'self_attention' )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 1 (Cross Attention).
_lowerCAmelCase =tax_layer_norm_lookup(a__ , a__ , 'decoder' , 'pre_cross_attention_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =tax_attention_lookup(a__ , a__ , 'decoder' , 'encoder_decoder_attention' )
_lowerCAmelCase =layer_norm
_lowerCAmelCase =k.T
_lowerCAmelCase =o.T
_lowerCAmelCase =q.T
_lowerCAmelCase =v.T
# Block i, layer 2 (MLP).
_lowerCAmelCase =tax_layer_norm_lookup(a__ , a__ , 'decoder' , 'pre_mlp_layer_norm' )
_lowerCAmelCase , _lowerCAmelCase =tax_mlp_lookup(a__ , a__ , 'decoder' , a__ )
_lowerCAmelCase =layer_norm
if split_mlp_wi:
_lowerCAmelCase =wi[0].T
_lowerCAmelCase =wi[1].T
else:
_lowerCAmelCase =wi.T
_lowerCAmelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_lowerCAmelCase =tax_relpos_bias_lookup(a__ , a__ , 'decoder' ).T
_lowerCAmelCase =old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_lowerCAmelCase =old['decoder/logits_dense/kernel'].T
return new
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase =state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_lowerCAmelCase =state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_lowerCAmelCase =state_dict['shared.weight']
return state_dict
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =checkpoints.load_tax_checkpoint(a__ )
_lowerCAmelCase =convert_tax_to_pytorch(
a__ , num_layers=config.num_layers , is_encoder_only=a__ , scalable_attention=a__ )
_lowerCAmelCase =make_state_dict(a__ , a__ )
model.load_state_dict(a__ , strict=a__ )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ = False , a__ = False , ):
'''simple docstring'''
_lowerCAmelCase =MTaConfig.from_json_file(a__ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_lowerCAmelCase =UMTaEncoderModel(a__ )
else:
_lowerCAmelCase =UMTaForConditionalGeneration(a__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(a__ , a__ , a__ , a__ , a__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(a__ )
# Verify that we can load the checkpoint.
model.from_pretrained(a__ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , ) -> Tuple:
_lowerCAmelCase =size if size is not None else {'shortest_edge': 20}
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =min_resolution
_lowerCAmelCase =max_resolution
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
def UpperCamelCase__ ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'do_center_crop' ) )
self.assertTrue(hasattr(__A , 'crop_size' ) )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_lowerCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> int:
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ) -> List[Any]:
# Initialize image_processing
_lowerCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_lowerCAmelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowerCAmelCase =image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 58
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 1
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =value_function
_lowerCAmelCase =unet
_lowerCAmelCase =scheduler
_lowerCAmelCase =env
_lowerCAmelCase =env.get_dataset()
_lowerCAmelCase ={}
for key in self.data.keys():
try:
_lowerCAmelCase =self.data[key].mean()
except: # noqa: E722
pass
_lowerCAmelCase ={}
for key in self.data.keys():
try:
_lowerCAmelCase =self.data[key].std()
except: # noqa: E722
pass
_lowerCAmelCase =env.observation_space.shape[0]
_lowerCAmelCase =env.action_space.shape[0]
def UpperCamelCase__ ( self , __A , __A ) -> str:
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , __A , __A ) -> List[Any]:
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , __A ) -> List[Any]:
if type(__A ) is dict:
return {k: self.to_torch(__A ) for k, v in x_in.items()}
elif torch.is_tensor(__A ):
return x_in.to(self.unet.device )
return torch.tensor(__A , device=self.unet.device )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[int]:
for key, val in cond.items():
_lowerCAmelCase =val.clone()
return x_in
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =x.shape[0]
_lowerCAmelCase =None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_lowerCAmelCase =torch.full((batch_size,) , __A , device=self.unet.device , dtype=torch.long )
for _ in range(__A ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCAmelCase =self.value_function(x.permute(0 , 2 , 1 ) , __A ).sample
_lowerCAmelCase =torch.autograd.grad([y.sum()] , [x] )[0]
_lowerCAmelCase =self.scheduler._get_variance(__A )
_lowerCAmelCase =torch.exp(0.5 * posterior_variance )
_lowerCAmelCase =model_std * grad
_lowerCAmelCase =0
_lowerCAmelCase =x.detach()
_lowerCAmelCase =x + scale * grad
_lowerCAmelCase =self.reset_xa(__A , __A , self.action_dim )
_lowerCAmelCase =self.unet(x.permute(0 , 2 , 1 ) , __A ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_lowerCAmelCase =self.scheduler.step(__A , __A , __A , predict_epsilon=__A )['prev_sample']
# apply conditions to the trajectory (set the initial state)
_lowerCAmelCase =self.reset_xa(__A , __A , self.action_dim )
_lowerCAmelCase =self.to_torch(__A )
return x, y
def __call__( self , __A , __A=64 , __A=32 , __A=2 , __A=0.1 ) -> str:
# normalize the observations and create batch dimension
_lowerCAmelCase =self.normalize(__A , 'observations' )
_lowerCAmelCase =obs[None].repeat(__A , axis=0 )
_lowerCAmelCase ={0: self.to_torch(__A )}
_lowerCAmelCase =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCAmelCase =randn_tensor(__A , device=self.unet.device )
_lowerCAmelCase =self.reset_xa(__A , __A , self.action_dim )
_lowerCAmelCase =self.to_torch(__A )
# run the diffusion process
_lowerCAmelCase , _lowerCAmelCase =self.run_diffusion(__A , __A , __A , __A )
# sort output trajectories by value
_lowerCAmelCase =y.argsort(0 , descending=__A ).squeeze()
_lowerCAmelCase =x[sorted_idx]
_lowerCAmelCase =sorted_values[:, :, : self.action_dim]
_lowerCAmelCase =actions.detach().cpu().numpy()
_lowerCAmelCase =self.de_normalize(__A , key='actions' )
# select the action with the highest value
if y is not None:
_lowerCAmelCase =0
else:
# if we didn't run value guiding, select a random action
_lowerCAmelCase =np.random.randint(0 , __A )
_lowerCAmelCase =denorm_actions[selected_index, 0]
return denorm_actions
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class SCREAMING_SNAKE_CASE ( unittest.TestCase , __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =load_tool('text-classification' )
self.tool.setup()
_lowerCAmelCase =load_tool('text-classification' , remote=__A )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(__A , 'positive' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(__A , 'positive' )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(__A , 'positive' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(__A , 'positive' )
| 58
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> int:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__A ):
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__A ):
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A )
self.assertIsNotNone(__A )
self.assertIsInstance(__A , __A )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A )
_lowerCAmelCase =FlaxBertModel.from_pretrained(__A )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**__A )
eval(**__A ).block_until_ready()
@slow
def UpperCamelCase__ ( self ) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A )
_lowerCAmelCase =FlaxRobertaModel.from_pretrained(__A )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**__A )
eval(**__A ).block_until_ready()
def UpperCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
__A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained(__A , revision='aaaaaa' )
def UpperCamelCase__ ( self ) -> List[str]:
with self.assertRaisesRegex(
__A , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(__A , 'Use `from_pt=True` to load this model' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 58
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 1
|
'''simple docstring'''
from collections import defaultdict
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =True
for v in tree[start]:
if v not in visited:
ret += dfs(a__ )
if ret % 2 == 0:
cuts.append(a__ )
return ret
def UpperCamelCase__ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowercase_ , lowercase_ = 10, 9
lowercase_ = defaultdict(list)
lowercase_ = {}
lowercase_ = []
lowercase_ = 0
lowercase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 58
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase_ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowercase_ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase =bs[:]
_lowerCAmelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase =[chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
_lowerCAmelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase =char
return pairs
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> Optional[int]:
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase =json.load(__A )
_lowerCAmelCase ={v: k for k, v in self.encoder.items()}
_lowerCAmelCase =errors # how to handle errors in decoding
_lowerCAmelCase =bytes_to_unicode()
_lowerCAmelCase ={v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase =merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase =[tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase =dict(zip(__A , range(len(__A ) ) ) )
_lowerCAmelCase ={}
_lowerCAmelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase =re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase__ ( self ) -> str:
return len(self.encoder )
def UpperCamelCase__ ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
if token in self.cache:
return self.cache[token]
_lowerCAmelCase =tuple(__A )
_lowerCAmelCase =get_pairs(__A )
if not pairs:
return token
while True:
_lowerCAmelCase =min(__A , key=lambda __A : self.bpe_ranks.get(__A , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase =bigram
_lowerCAmelCase =[]
_lowerCAmelCase =0
while i < len(__A ):
try:
_lowerCAmelCase =word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase =j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase =tuple(__A )
_lowerCAmelCase =new_word
if len(__A ) == 1:
break
else:
_lowerCAmelCase =get_pairs(__A )
_lowerCAmelCase =' '.join(__A )
_lowerCAmelCase =word
return word
def UpperCamelCase__ ( self , __A ) -> Optional[Any]:
_lowerCAmelCase =[]
for token in re.findall(self.pat , __A ):
_lowerCAmelCase =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ ( self , __A ) -> Optional[Any]:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , __A ) -> str:
return self.decoder.get(__A )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =''.join(__A )
_lowerCAmelCase =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase =os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '\n' )
_lowerCAmelCase =0
with open(__A , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase =token_index
writer.write(' '.join(__A ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , __A , __A=False , **__A ) -> str:
_lowerCAmelCase =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
_lowerCAmelCase =' ' + text
return (text, kwargs)
| 58
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = 0.9 , __A = PILImageResampling.BICUBIC , __A = True , __A = None , __A = 1 / 255 , __A = True , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =crop_pct
_lowerCAmelCase =resample
_lowerCAmelCase =do_center_crop
_lowerCAmelCase =crop_size
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase__ ( self , __A , __A , __A = None , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
_lowerCAmelCase =int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_lowerCAmelCase =int(size['height'] / crop_pct )
else:
_lowerCAmelCase =(int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__A ) )
_lowerCAmelCase =get_resize_output_image_size(__A , size=__A , default_to_square=__A )
else:
if "shortest_edge" in size:
_lowerCAmelCase =get_resize_output_image_size(__A , size=size['shortest_edge'] , default_to_square=__A )
elif "height" in size and "width" in size:
_lowerCAmelCase =(size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__A ) )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
_lowerCAmelCase =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(F'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__A , size=(size['height'], size['width']) , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> int:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =crop_pct if crop_pct is not None else self.crop_pct
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(__A , default_to_square=__A )
_lowerCAmelCase =crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase =get_size_dict(__A , param_name='crop_size' )
_lowerCAmelCase =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images]
if do_center_crop:
_lowerCAmelCase =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 58
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=3 , __A=32 , __A=3 , __A=10 , __A=[10, 20, 30, 40] , __A=[1, 1, 2, 1] , __A=True , __A=True , __A="relu" , __A=3 , __A=None , ) -> Union[str, Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embeddings_size
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_act
_lowerCAmelCase =num_labels
_lowerCAmelCase =scope
_lowerCAmelCase =len(__A )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =TFResNetModel(config=__A )
_lowerCAmelCase =model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFResNetForImageClassification(__A )
_lowerCAmelCase =model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase : List[Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase : List[str] = False
lowercase : Optional[Any] = False
lowercase : List[str] = False
lowercase : Union[str, Any] = False
lowercase : List[str] = False
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =TFResNetModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , has_text_modality=__A )
def UpperCamelCase__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Optional[Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ) -> str:
pass
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__A )
_lowerCAmelCase =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(__A , __A , __A ):
_lowerCAmelCase =model_class(__A )
_lowerCAmelCase =model(**self._prepare_for_class(__A , __A ) )
_lowerCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase =self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase =layer_type
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFResNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__A , return_tensors='tf' )
# forward pass
_lowerCAmelCase =model(**__A )
# verify the logits
_lowerCAmelCase =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
_lowerCAmelCase =tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1E-4 ) )
| 58
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
lowercase_ = {
'''gpt-neox-20b''': 2048,
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , __A=None , __A=None , __A=None , __A="<|endoftext|>" , __A="<|endoftext|>" , __A="<|endoftext|>" , __A=False , **__A , ) -> List[Any]:
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , add_prefix_space=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space:
_lowerCAmelCase =getattr(__A , pre_tok_state.pop('type' ) )
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =pre_tok_class(**__A )
_lowerCAmelCase =add_prefix_space
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def UpperCamelCase__ ( self , __A ) -> List[int]:
_lowerCAmelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowerCAmelCase =input_ids[-self.model_max_length :]
return input_ids
| 58
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
def update_area_of_max_square(a__ , a__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase =update_area_of_max_square(a__ , col + 1 )
_lowerCAmelCase =update_area_of_max_square(row + 1 , col + 1 )
_lowerCAmelCase =update_area_of_max_square(row + 1 , a__ )
if mat[row][col]:
_lowerCAmelCase =1 + min([right, diagonal, down] )
_lowerCAmelCase =max(largest_square_area[0] , a__ )
return sub_problem_sol
else:
return 0
_lowerCAmelCase =[0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
a__ , a__ , a__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase =update_area_of_max_square_using_dp_array(a__ , col + 1 , a__ )
_lowerCAmelCase =update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a__ )
_lowerCAmelCase =update_area_of_max_square_using_dp_array(row + 1 , a__ , a__ )
if mat[row][col]:
_lowerCAmelCase =1 + min([right, diagonal, down] )
_lowerCAmelCase =max(largest_square_area[0] , a__ )
_lowerCAmelCase =sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase =[0]
_lowerCAmelCase =[[-1] * cols for _ in range(a__ )]
update_area_of_max_square_using_dp_array(0 , 0 , a__ )
return largest_square_area[0]
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase =0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase =dp_array[row][col + 1]
_lowerCAmelCase =dp_array[row + 1][col + 1]
_lowerCAmelCase =dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase =1 + min(a__ , a__ , a__ )
_lowerCAmelCase =max(dp_array[row][col] , a__ )
else:
_lowerCAmelCase =0
return largest_square_area
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[0] * (cols + 1)
_lowerCAmelCase =[0] * (cols + 1)
_lowerCAmelCase =0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase =current_row[col + 1]
_lowerCAmelCase =next_row[col + 1]
_lowerCAmelCase =next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase =1 + min(a__ , a__ , a__ )
_lowerCAmelCase =max(current_row[col] , a__ )
else:
_lowerCAmelCase =0
_lowerCAmelCase =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 58
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( ):
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =2
while i * i <= n:
_lowerCAmelCase =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase__ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(a__ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 58
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Tuple = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
def UpperCamelCase__ ( *a__ , **a__ ):
'''simple docstring'''
requires_backends(a__ , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Tuple = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[int] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[Any] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : str = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Dict:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Tuple = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Union[str, Any]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Any = ['torch']
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[int]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> int:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : int = ['torch']
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : List[str] = ['torch']
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[str]:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Optional[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> str:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Optional[Any] = ['torch']
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> List[Any]:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
class SCREAMING_SNAKE_CASE ( metaclass=__lowercase):
"""simple docstring"""
lowercase : Dict = ['torch']
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Any:
requires_backends(cls , ['torch'] )
@classmethod
def UpperCamelCase__ ( cls , *__A , **__A ) -> Tuple:
requires_backends(cls , ['torch'] )
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(a__ ) * abs(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 58
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=16 , __A=36 , __A=6 , __A=6 , __A=6 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =embedding_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_hidden_groups
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =scope
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> List[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
_lowerCAmelCase =AlbertModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A )
_lowerCAmelCase =model(__A , token_type_ids=__A )
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
_lowerCAmelCase =AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =self.num_choices
_lowerCAmelCase =AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase : Union[str, Any] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : int = True
def UpperCamelCase__ ( self , __A , __A , __A=False ) -> int:
_lowerCAmelCase =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
_lowerCAmelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
_lowerCAmelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =AlbertModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase =type
self.model_tester.create_and_check_model(*__A )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =AlbertModel.from_pretrained('albert-base-v2' )
_lowerCAmelCase =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase =model(__A , attention_mask=__A )[0]
_lowerCAmelCase =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
_lowerCAmelCase =torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
| 58
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__A , **__A ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@require_torch
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__A ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
] , )
@require_tf
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(__A ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
] , )
@slow
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__A ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__A ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
@wraps(a__ )
def _inner_fn(*a__ , **a__ ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , a__ , )
return fn(*a__ , **a__ )
return _inner_fn
| 58
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowercase_ = parser.parse_args()
lowercase_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 58
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 1
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(a__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(a__ ) == 1:
return True
_lowerCAmelCase =series[1] - series[0]
for index in range(len(a__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(a__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCAmelCase =0
for val in series:
answer += val
return answer / len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 1
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowercase_ = spec.loader.load_module()
lowercase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowercase_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =[]
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCAmelCase =False
# source code of `config_class`
_lowerCAmelCase =inspect.getsource(a__ )
_lowerCAmelCase =_re_checkpoint.findall(a__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCAmelCase , _lowerCAmelCase =checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase =F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase =True
break
_lowerCAmelCase =config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(a__ )
if len(a__ ) > 0:
_lowerCAmelCase ='\n'.join(sorted(a__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase_ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowercase_ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowercase_ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase__ ( a__ , a__ , a__="binary" ):
'''simple docstring'''
_lowerCAmelCase =simple_accuracy(a__ , a__ )
_lowerCAmelCase =float(fa_score(y_true=a__ , y_pred=a__ , average=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={}
for id_pred, label in zip(a__ , a__ ):
_lowerCAmelCase =F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
_lowerCAmelCase =id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_lowerCAmelCase =[(pred, label)]
_lowerCAmelCase , _lowerCAmelCase =[], []
for question, preds_labels in question_map.items():
_lowerCAmelCase , _lowerCAmelCase =zip(*a__ )
_lowerCAmelCase =fa_score(y_true=a__ , y_pred=a__ , average='macro' )
fas.append(a__ )
_lowerCAmelCase =int(sum(pred == label for pred, label in preds_labels ) == len(a__ ) )
ems.append(a__ )
_lowerCAmelCase =float(sum(a__ ) / len(a__ ) )
_lowerCAmelCase =sum(a__ ) / len(a__ )
_lowerCAmelCase =float(fa_score(y_true=a__ , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def UpperCamelCase__ ( self ) -> Dict:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def UpperCamelCase__ ( self , __A , __A ) -> Tuple:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "cb":
return acc_and_fa(__A , __A , fa_avg='macro' )
elif self.config_name == "record":
_lowerCAmelCase =[
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
_lowerCAmelCase ={pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(__A , __A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__A , __A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase__ ( a__ , a__ , a__ = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
_lowerCAmelCase =quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type='dataset' , revision=a__ )
| 58
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =filter(lambda a__ : p.requires_grad , model.parameters() )
_lowerCAmelCase =sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase_ = logging.getLogger(__name__)
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if metric == "rouge2":
_lowerCAmelCase ='{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_lowerCAmelCase ='{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_lowerCAmelCase ='{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_lowerCAmelCase ='{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
_lowerCAmelCase =ModelCheckpoint(
dirpath=a__ , filename=a__ , monitor=F'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=a__ , verbose=a__ , )
class SCREAMING_SNAKE_CASE ( pl.Callback):
"""simple docstring"""
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase ={F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__A )
@rank_zero_only
def UpperCamelCase__ ( self , __A , __A , __A , __A=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_lowerCAmelCase =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_lowerCAmelCase =Path(pl_module.hparams.output_dir )
if type_path == "test":
_lowerCAmelCase =od / 'test_results.txt'
_lowerCAmelCase =od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_lowerCAmelCase =od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_lowerCAmelCase =od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__A )
generations_file.parent.mkdir(exist_ok=__A )
with open(__A , 'a+' ) as writer:
for key in sorted(__A ):
if key in ["log", "progress_bar", "preds"]:
continue
_lowerCAmelCase =metrics[key]
if isinstance(__A , torch.Tensor ):
_lowerCAmelCase =val.item()
_lowerCAmelCase =F'''{key}: {val:.6f}\n'''
writer.write(__A )
if not save_generations:
return
if "preds" in metrics:
_lowerCAmelCase ='\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(__A )
@rank_zero_only
def UpperCamelCase__ ( self , __A , __A ) -> Dict:
try:
_lowerCAmelCase =pl_module.model.model.num_parameters()
except AttributeError:
_lowerCAmelCase =pl_module.model.num_parameters()
_lowerCAmelCase =count_trainable_parameters(__A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase__ ( self , __A , __A ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__A , __A , 'test' )
@rank_zero_only
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def decorator(a__ ):
_lowerCAmelCase =getattr(a__ , 'handle_key' , [] )
handle += [key]
setattr(a__ , 'handle_key' , a__ )
return func
return decorator
def UpperCamelCase__ ( *a__ ):
'''simple docstring'''
def decorator(a__ ):
_lowerCAmelCase =getattr(a__ , 'handle_key' , [] )
handle += keys
setattr(a__ , 'handle_key' , a__ )
return func
return decorator
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __new__( cls , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =super().__new__(cls , __A , __A , __A )
if not hasattr(__A , 'key_handler' ):
setattr(__A , 'key_handler' , {} )
setattr(__A , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
_lowerCAmelCase =getattr(__A , 'handle_key' , [] )
for key in handled_keys:
_lowerCAmelCase =value
return new_cls
@staticmethod
def UpperCamelCase__ ( cls ) -> Tuple:
_lowerCAmelCase =get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase =ord(__A )
_lowerCAmelCase =cls.key_handler.get(__A )
if handler:
_lowerCAmelCase =char
return handler(cls )
else:
return None
def UpperCamelCase__ ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(a__ , a__ )
# Predict target for test data
_lowerCAmelCase =xgb.predict(a__ )
_lowerCAmelCase =predictions.reshape(len(a__ ) , 1 )
return predictions
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =fetch_california_housing()
_lowerCAmelCase , _lowerCAmelCase =data_handling(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =train_test_split(
a__ , a__ , test_size=0.25 , random_state=1 )
_lowerCAmelCase =xgboost(a__ , a__ , a__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(a__ , a__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(a__ , a__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 58
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=4 , ) -> str:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_attention_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_choices
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_attention_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase =True
_lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Any = True
lowercase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase__ ( self ) -> int:
for model_class_name in self.all_model_classes:
_lowerCAmelCase =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__A )
_lowerCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__A )
_lowerCAmelCase =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_lowerCAmelCase =model(__A )[0]
_lowerCAmelCase =[1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
_lowerCAmelCase =np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__A )
_lowerCAmelCase =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
_lowerCAmelCase =model(__A )[0]
# compare the actual values for a slice.
_lowerCAmelCase =np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
| 58
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0 ):
'''simple docstring'''
if not isinstance(a__ , a__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCAmelCase =1_0**n
_lowerCAmelCase =2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , a__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(10) = }')
| 58
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : int
lowercase : TreeNode | None = None
lowercase : TreeNode | None = None
lowercase_ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(a__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a__ ) != count_coins(a__ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(a__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase , _lowerCAmelCase =get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase =get_distrib(node.right )
_lowerCAmelCase =1 - left_distrib_excess
_lowerCAmelCase =1 - right_distrib_excess
_lowerCAmelCase =(
left_distrib_moves
+ right_distrib_moves
+ abs(a__ )
+ abs(a__ )
)
_lowerCAmelCase =node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a__ , a__ )
return get_distrib(a__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 1
|
'''simple docstring'''
import requests
lowercase_ = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 58
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 58
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =SavedModel()
_lowerCAmelCase =[]
with open(os.path.join(a__ , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_lowerCAmelCase =json.load(a__ )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(a__ )] )
with open(a__ , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_lowerCAmelCase =set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_lowerCAmelCase =sorted(a__ )
_lowerCAmelCase =[]
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(a__ )
if strict and len(a__ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(a__ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*a__ , sep='\n' )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowercase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 58
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 1
|
'''simple docstring'''
import math
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 58
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 1
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __lowercase , )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : str = RobertaConfig
lowercase : Dict = 'roberta'
def __init__( self , __A ) -> Union[str, Any]:
super().__init__(__A )
_lowerCAmelCase =RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __lowercase , )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = RobertaConfig
lowercase : Tuple = 'roberta'
def __init__( self , __A ) -> Dict:
super().__init__(__A )
_lowerCAmelCase =config.num_labels
_lowerCAmelCase =config.num_hidden_layers
_lowerCAmelCase =DeeRobertaModel(__A )
_lowerCAmelCase =nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def UpperCamelCase__ ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=-1 , __A=False , ) -> str:
_lowerCAmelCase =self.num_layers
try:
_lowerCAmelCase =self.roberta(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
_lowerCAmelCase =outputs[1]
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.classifier(__A )
_lowerCAmelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase =e.message
_lowerCAmelCase =e.exit_layer
_lowerCAmelCase =outputs[0]
if not self.training:
_lowerCAmelCase =entropy(__A )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase =MSELoss()
_lowerCAmelCase =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase =CrossEntropyLoss()
_lowerCAmelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase =[]
for highway_exit in outputs[-1]:
_lowerCAmelCase =highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase =MSELoss()
_lowerCAmelCase =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase =CrossEntropyLoss()
_lowerCAmelCase =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
_lowerCAmelCase =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase =(loss,) + outputs
if not self.training:
_lowerCAmelCase =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase_ = '''src/transformers'''
# Matches is_xxx_available()
lowercase_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowercase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowercase_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowercase_ = re.compile(r'''^\s*else:''')
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if _re_test_backend.search(a__ ) is None:
return None
_lowerCAmelCase =[b[0] for b in _re_backend.findall(a__ )]
backends.sort()
return "_and_".join(a__ )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
with open(a__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase =f.readlines()
_lowerCAmelCase =0
while line_index < len(a__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a__ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCAmelCase =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_lowerCAmelCase =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a__ ):
_lowerCAmelCase =_re_one_line_import_struct.search(a__ ).groups()[0]
_lowerCAmelCase =re.findall(r'\[([^\]]+)\]' , a__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_lowerCAmelCase =_re_import_struct_key_value.search(a__ )
if single_line_import_search is not None:
_lowerCAmelCase =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(a__ ) > 0]
objects.extend(a__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCAmelCase ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCAmelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_lowerCAmelCase =lines[line_index]
if _re_import_struct_add_one.search(a__ ) is not None:
objects.append(_re_import_struct_add_one.search(a__ ).groups()[0] )
elif _re_import_struct_add_many.search(a__ ) is not None:
_lowerCAmelCase =_re_import_struct_add_many.search(a__ ).groups()[0].split(', ' )
_lowerCAmelCase =[obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_between_brackets.search(a__ ) is not None:
_lowerCAmelCase =_re_between_brackets.search(a__ ).groups()[0].split(', ' )
_lowerCAmelCase =[obj[1:-1] for obj in imports if len(a__ ) > 0]
objects.extend(a__ )
elif _re_quote_object.search(a__ ) is not None:
objects.append(_re_quote_object.search(a__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '"' ):
objects.append(line[1_3:-3] )
line_index += 1
_lowerCAmelCase =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCAmelCase =[]
while (
line_index < len(a__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCAmelCase ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(a__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCAmelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_lowerCAmelCase =lines[line_index]
_lowerCAmelCase =_re_import.search(a__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_lowerCAmelCase =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def find_duplicates(a__ ):
return [k for k, v in collections.Counter(a__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCAmelCase =[]
for key in import_dict_objects.keys():
_lowerCAmelCase =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCAmelCase =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCAmelCase ='base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =[]
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_lowerCAmelCase =os.path.join(a__ , '__init__.py' )
_lowerCAmelCase =parse_init(a__ )
if objects is not None:
_lowerCAmelCase =analyze_results(*a__ )
if len(a__ ) > 0:
_lowerCAmelCase =F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(a__ ) )
if len(a__ ) > 0:
raise ValueError('\n\n'.join(a__ ) )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =[]
for path, directories, files in os.walk(a__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(a__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a__ ) / folder).glob('*.py' ) ) ) == 0:
continue
_lowerCAmelCase =str((Path(a__ ) / folder).relative_to(a__ ) )
_lowerCAmelCase =short_path.replace(os.path.sep , '.' )
submodules.append(a__ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCAmelCase =str((Path(a__ ) / fname).relative_to(a__ ) )
_lowerCAmelCase =short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(a__ )
return submodules
lowercase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def UpperCamelCase__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
_lowerCAmelCase =direct_transformers_import(a__ )
_lowerCAmelCase =set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(a__ , '__init__.py' ) , 'r' ) as f:
_lowerCAmelCase =f.read()
import_structure_keys.update(set(re.findall(r'import_structure\[\"([^\"]*)\"\]' , a__ ) ) )
_lowerCAmelCase =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(a__ ) > 0:
_lowerCAmelCase ='\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 58
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 1
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> List[Any]:
raise NotImplementedError()
def UpperCamelCase__ ( self ) -> str:
raise NotImplementedError()
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = False , **__A ) -> Dict:
_lowerCAmelCase =tokenizer
_lowerCAmelCase =skip_prompt
_lowerCAmelCase =decode_kwargs
# variables used in the streaming process
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =True
def UpperCamelCase__ ( self , __A ) -> int:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
_lowerCAmelCase =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
_lowerCAmelCase =text[self.print_len :]
_lowerCAmelCase =[]
_lowerCAmelCase =0
# If the last token is a CJK character, we print the characters.
elif len(__A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase =text[self.print_len :]
self.print_len += len(__A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase =text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(__A )
self.on_finalized_text(__A )
def UpperCamelCase__ ( self ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase =text[self.print_len :]
_lowerCAmelCase =[]
_lowerCAmelCase =0
else:
_lowerCAmelCase =''
_lowerCAmelCase =True
self.on_finalized_text(__A , stream_end=__A )
def UpperCamelCase__ ( self , __A , __A = False ) -> Tuple:
print(__A , flush=__A , end='' if not stream_end else None )
def UpperCamelCase__ ( self , __A ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A = False , __A = None , **__A ) -> List[Any]:
super().__init__(__A , __A , **__A )
_lowerCAmelCase =Queue()
_lowerCAmelCase =None
_lowerCAmelCase =timeout
def UpperCamelCase__ ( self , __A , __A = False ) -> Any:
self.text_queue.put(__A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Optional[int]:
return self
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 58
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase ={
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_lowerCAmelCase ={
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(__A ) , __A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__A ) , x.transpose() ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(transpose(__A ) , transpose(__A ).numpy() ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , transpose(__A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(transpose(__A ) , transpose(__A ).numpy() ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , transpose(__A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(transpose(__A ) , np.asarray(transpose(__A ) ) ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , np.asarray(transpose(__A , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , np.reshape(__A , (4, 3) ) ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , np.reshape(__A , (12, 5) ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , reshape(__A , (4, 3) ).numpy() ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , reshape(__A , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , reshape(__A , (4, 3) ).numpy() ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , reshape(__A , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , np.asarray(reshape(__A , (4, 3) ) ) ) )
_lowerCAmelCase =np.random.randn(3 , 4 , 5 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , np.asarray(reshape(__A , (12, 5) ) ) ) )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__A ) , np.squeeze(__A ) ) )
_lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , np.squeeze(__A , axis=2 ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =np.random.randn(1 , 3 , 4 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(squeeze(__A ) , squeeze(__A ).numpy() ) )
_lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , squeeze(__A , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =np.random.randn(1 , 3 , 4 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(squeeze(__A ) , squeeze(__A ).numpy() ) )
_lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , squeeze(__A , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =np.random.randn(1 , 3 , 4 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(squeeze(__A ) , np.asarray(squeeze(__A ) ) ) )
_lowerCAmelCase =np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , np.asarray(squeeze(__A , axis=2 ) ) ) )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , np.expand_dims(__A , axis=1 ) ) )
@require_torch
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =torch.tensor(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , expand_dims(__A , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =tf.constant(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , expand_dims(__A , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =np.random.randn(3 , 4 )
_lowerCAmelCase =jnp.array(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , np.asarray(expand_dims(__A , axis=1 ) ) ) )
| 58
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 1
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = 'owlvit_text_model'
def __init__( self , __A=4_9408 , __A=512 , __A=2048 , __A=12 , __A=8 , __A=16 , __A="quick_gelu" , __A=1E-5 , __A=0.0 , __A=0.02 , __A=1.0 , __A=0 , __A=4_9406 , __A=4_9407 , **__A , ) -> Any:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_act
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =initializer_factor
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_lowerCAmelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'owlvit_vision_model'
def __init__( self , __A=768 , __A=3072 , __A=12 , __A=12 , __A=3 , __A=768 , __A=32 , __A="quick_gelu" , __A=1E-5 , __A=0.0 , __A=0.02 , __A=1.0 , **__A , ) -> Dict:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =initializer_factor
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'owlvit'
lowercase : Optional[int] = True
def __init__( self , __A=None , __A=None , __A=512 , __A=2.6_592 , __A=True , **__A , ) -> Optional[int]:
super().__init__(**__A )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
_lowerCAmelCase =OwlViTTextConfig(**__A )
_lowerCAmelCase =OwlViTVisionConfig(**__A )
_lowerCAmelCase =projection_dim
_lowerCAmelCase =logit_scale_init_value
_lowerCAmelCase =return_dict
_lowerCAmelCase =1.0
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
@classmethod
def UpperCamelCase__ ( cls , __A , __A , **__A ) -> Union[str, Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =text_config
_lowerCAmelCase =vision_config
return cls.from_dict(__A , **__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
def UpperCamelCase__ ( self , __A , __A = -1 , __A = -1 , __A = None , ) -> Mapping[str, Any]:
_lowerCAmelCase =super().generate_dummy_inputs(
processor.tokenizer , batch_size=__A , seq_length=__A , framework=__A )
_lowerCAmelCase =super().generate_dummy_inputs(
processor.image_processor , batch_size=__A , framework=__A )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase__ ( self ) -> int:
return 14
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 1
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_lowerCAmelCase =Vector()
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__A ) , '(0,0,0,0,0,1)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(__A ) , 4 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2] )
_lowerCAmelCase =Vector([1, 2, 3, 4, 5] )
_lowerCAmelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_lowerCAmelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([2, -1, 4] ) # for test of dot product
_lowerCAmelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__ ( self ) -> None:
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def UpperCamelCase__ ( self ) -> None:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 2, 3] )
_lowerCAmelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __A , __A ) ) , '(3,4,7)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 0, 0, 0, 0, 0] )
_lowerCAmelCase =x.copy()
self.assertEqual(str(__A ) , str(__A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__A ) , '(0,1,0)' )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(__A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__A , __A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__A , __A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_lowerCAmelCase =Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(__A ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_lowerCAmelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def UpperCamelCase__ ( self ) -> None:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowercase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''DPTFeatureExtractor''']
lowercase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 1
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
_lowerCAmelCase =len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase =list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase =tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase =tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase =[
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase =tf.placeholder('float64' , [dim] )
_lowerCAmelCase =[]
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase =[tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase =tf.placeholder('int32' )
_lowerCAmelCase =[]
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase =tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase =tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase =tf.placeholder('float' , [dim] )
_lowerCAmelCase =tf.placeholder('float' , [dim] )
_lowerCAmelCase =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase =tf.placeholder('float' , [noofclusters] )
_lowerCAmelCase =tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase =tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase =1_0_0
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
_lowerCAmelCase =vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase =[
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase =sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase =[
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase =sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase =sess.run(a__ )
_lowerCAmelCase =sess.run(a__ )
return centroids, assignments
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'swinv2'
lowercase : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=[2, 2, 6, 2] , __A=[3, 6, 12, 24] , __A=7 , __A=4.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=0.02 , __A=1E-5 , __A=32 , **__A , ) -> Optional[Any]:
super().__init__(**__A )
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embed_dim
_lowerCAmelCase =depths
_lowerCAmelCase =len(__A )
_lowerCAmelCase =num_heads
_lowerCAmelCase =window_size
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =hidden_act
_lowerCAmelCase =use_absolute_embeddings
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =initializer_range
_lowerCAmelCase =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase =int(embed_dim * 2 ** (len(__A ) - 1) )
_lowerCAmelCase =(0, 0, 0, 0)
| 58
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase_ = get_tests_dir('''fixtures''')
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Union[str, Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained(__A )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__A , repo_id='test-feature-extractor' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained(__A )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__A , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
_lowerCAmelCase =CustomFeatureExtractor.from_pretrained(__A )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
_lowerCAmelCase =AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 58
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 1
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase_ = TypeVar('''KT''')
lowercase_ = TypeVar('''VT''')
class SCREAMING_SNAKE_CASE ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self , __A = "root" , __A = None ) -> Dict:
_lowerCAmelCase =key
_lowerCAmelCase =value
_lowerCAmelCase =[]
def __repr__( self ) -> str:
return F'''Node({self.key}: {self.value})'''
@property
def UpperCamelCase__ ( self ) -> int:
return len(self.forward )
class SCREAMING_SNAKE_CASE ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self , __A = 0.5 , __A = 16 ) -> int:
_lowerCAmelCase =Node[KT, VT]()
_lowerCAmelCase =0
_lowerCAmelCase =p
_lowerCAmelCase =max_level
def __str__( self ) -> str:
_lowerCAmelCase =list(self )
if len(__A ) == 0:
return F'''SkipList(level={self.level})'''
_lowerCAmelCase =max((len(str(__A ) ) for item in items) , default=4 )
_lowerCAmelCase =max(__A , 4 ) + 4
_lowerCAmelCase =self.head
_lowerCAmelCase =[]
_lowerCAmelCase =node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(__A , '-' ) + '* ' * len(__A ) )
lines.append(' ' * label_size + '| ' * len(__A ) )
while len(node.forward ) != 0:
_lowerCAmelCase =node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(__A , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__A ) )
_lowerCAmelCase =node.forward
lines.append('None'.ljust(__A ) + '* ' * len(__A ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(__A )
def __iter__( self ) -> int:
_lowerCAmelCase =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase =node.forward[0]
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase__ ( self , __A ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
_lowerCAmelCase =[]
_lowerCAmelCase =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase__ ( self , __A ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(__A )
if node is not None:
for i, update_node in enumerate(__A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase =node.forward[i]
else:
_lowerCAmelCase =update_node.forward[:i]
def UpperCamelCase__ ( self , __A , __A ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(__A )
if node is not None:
_lowerCAmelCase =value
else:
_lowerCAmelCase =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __A ):
update_vector.append(self.head )
_lowerCAmelCase =level
_lowerCAmelCase =Node(__A , __A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__A )
else:
_lowerCAmelCase =new_node
def UpperCamelCase__ ( self , __A ) -> VT | None:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(__A )
if node is not None:
return node.value
return None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
_lowerCAmelCase =skip_list.head
_lowerCAmelCase ={}
while node.level != 0:
_lowerCAmelCase =node.forward[0]
_lowerCAmelCase =node.value
assert len(a__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
_lowerCAmelCase =skip_list.head
_lowerCAmelCase ={}
while node.level != 0:
_lowerCAmelCase =node.forward[0]
_lowerCAmelCase =node.value
if len(a__ ) != 4:
print()
assert len(a__ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
assert skip_list.find('Some key' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(a__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(a__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCamelCase__ ( ):
'''simple docstring'''
def is_sorted(a__ ):
return all(next_item >= item for item, next_item in zip(a__ , lst[1:] ) )
_lowerCAmelCase =SkipList()
for i in range(1_0 ):
skip_list.insert(a__ , a__ )
assert is_sorted(list(a__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(a__ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(a__ ) )
def UpperCamelCase__ ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['pixel_values']
def __init__( self , __A = True , __A = 1 / 255 , __A = True , __A = 8 , **__A , ) -> None:
super().__init__(**__A )
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_pad
_lowerCAmelCase =pad_size
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def UpperCamelCase__ ( self , __A , __A , __A = None ) -> int:
_lowerCAmelCase , _lowerCAmelCase =get_image_size(__A )
_lowerCAmelCase =(old_height // size + 1) * size - old_height
_lowerCAmelCase =(old_width // size + 1) * size - old_width
return pad(__A , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__A )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> Dict:
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_pad if do_pad is not None else self.do_pad
_lowerCAmelCase =pad_size if pad_size is not None else self.pad_size
_lowerCAmelCase =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(__A ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=__A , scale=__A ) for image in images]
if do_pad:
_lowerCAmelCase =[self.pad(__A , size=__A ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(__A , __A ) for image in images]
_lowerCAmelCase ={'pixel_values': images}
return BatchFeature(data=__A , tensor_type=__A )
| 58
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase_ = logging.getLogger()
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ='\n'.join(a__ )
Path(a__ ).open('w' ).writelines(a__ )
lowercase_ = '''patrickvonplaten/t5-tiny-random'''
lowercase_ = '''sshleifer/bart-tiny-random'''
lowercase_ = '''sshleifer/tiny-mbart'''
lowercase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_lowerCAmelCase =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowerCAmelCase =[' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(__A , __A )
_lowerCAmelCase =str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_lowerCAmelCase ='translation_en_to_de' if model == T5_TINY else 'summarization'
_lowerCAmelCase =F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(__A , 'argv' , __A ):
run_generate()
assert Path(__A ).exists()
# os.remove(Path(output_file_name))
def UpperCamelCase__ ( self ) -> str:
self.run_eval_tester(__A )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self , __A ) -> Dict:
self.run_eval_tester(__A )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_lowerCAmelCase =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_lowerCAmelCase ={
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_lowerCAmelCase =Path(self.get_auto_remove_tmp_dir() )
_lowerCAmelCase =str(tmp_dir / 'scores.json' )
_lowerCAmelCase =str(tmp_dir / 'val.target' )
_dump_articles(__A , text['en'] )
_dump_articles(__A , text['de'] )
_lowerCAmelCase ='translation_en_to_de' if model == T5_TINY else 'summarization'
_lowerCAmelCase =F'''
run_eval_search.py
{model}
{str(__A )}
{str(__A )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(__A , 'argv' , __A ):
with CaptureStdout() as cs:
run_search()
_lowerCAmelCase =[' num_beams | length_penalty', model, 'Best score args']
_lowerCAmelCase =['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(__A )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__A ).exists()
os.remove(Path(__A ) )
| 58
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_lowerCAmelCase =name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
_lowerCAmelCase =name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
_lowerCAmelCase =name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
_lowerCAmelCase =name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
_lowerCAmelCase =name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
_lowerCAmelCase =name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCAmelCase =name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
_lowerCAmelCase =name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
_lowerCAmelCase =name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
_lowerCAmelCase =name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
_lowerCAmelCase =name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCAmelCase =name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
_lowerCAmelCase =name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
_lowerCAmelCase =name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
_lowerCAmelCase =name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
_lowerCAmelCase =name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
_lowerCAmelCase =name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
_lowerCAmelCase =name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
_lowerCAmelCase =name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
_lowerCAmelCase =name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCAmelCase =name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
_lowerCAmelCase =name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
_lowerCAmelCase =name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
_lowerCAmelCase =name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase =orig_state_dict.pop(a__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase =key.split('.' )
_lowerCAmelCase , _lowerCAmelCase =int(key_split[2] ), int(key_split[4] )
_lowerCAmelCase =config.vision_config.hidden_size
if "weight" in key:
_lowerCAmelCase =val[:dim, :]
_lowerCAmelCase =val[dim : dim * 2, :]
_lowerCAmelCase =val[-dim:, :]
else:
_lowerCAmelCase =val[:dim]
_lowerCAmelCase =val[dim : dim * 2]
_lowerCAmelCase =val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCAmelCase =key.split('.' )
_lowerCAmelCase =int(key_split[3] )
_lowerCAmelCase =config.text_config.hidden_size
if "weight" in key:
_lowerCAmelCase =val[:dim, :]
_lowerCAmelCase =val[
dim : dim * 2, :
]
_lowerCAmelCase =val[-dim:, :]
else:
_lowerCAmelCase =val[:dim]
_lowerCAmelCase =val[dim : dim * 2]
_lowerCAmelCase =val[-dim:]
else:
_lowerCAmelCase =rename_key(a__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCAmelCase =val.squeeze_()
else:
_lowerCAmelCase =val
return orig_state_dict
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase =Image.open(requests.get(a__ , stream=a__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( a__ , a__ , a__="groupvit-gcc-yfcc" , a__=False ):
'''simple docstring'''
_lowerCAmelCase =GroupViTConfig()
_lowerCAmelCase =GroupViTModel(a__ ).eval()
_lowerCAmelCase =torch.load(a__ , map_location='cpu' )['model']
_lowerCAmelCase =convert_state_dict(a__ , a__ )
_lowerCAmelCase , _lowerCAmelCase =model.load_state_dict(a__ , strict=a__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(a__ ) == 0)
# verify result
_lowerCAmelCase =CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =processor(text=['a photo of a cat', 'a photo of a dog'] , images=a__ , padding=a__ , return_tensors='pt' )
with torch.no_grad():
_lowerCAmelCase =model(**a__ )
if model_name == "groupvit-gcc-yfcc":
_lowerCAmelCase =torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCAmelCase =torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , a__ , atol=1E-3 )
processor.save_pretrained(a__ )
model.save_pretrained(a__ )
print('Successfully saved processor and model to' , a__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(a__ , organization='nielsr' )
model.push_to_hub(a__ , organization='nielsr' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
lowercase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowercase_ = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
_lowerCAmelCase =self.transformer_dir
shutil.copy(
os.path.join(__A , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase ='src/transformers'
shutil.rmtree(self.transformer_dir )
def UpperCamelCase__ ( self , __A , __A , __A , __A=None ) -> Optional[Any]:
_lowerCAmelCase =comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
_lowerCAmelCase =comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
_lowerCAmelCase =black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_lowerCAmelCase =black.format_str(__A , mode=__A )
_lowerCAmelCase =os.path.join(self.transformer_dir , 'new_code.py' )
with open(__A , 'w' , newline='\n' ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__A )
with open(__A , 'r' ) as f:
self.assertTrue(f.read() , __A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , __A , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , __A ) , )
# Copy consistency with a really long name
_lowerCAmelCase ='TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , __A , __A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , __A , overwrite_result=re.sub('Bert' , 'TestModel' , __A ) , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =check_copies.LOCALIZED_READMES['README_zh-hans.md']
_lowerCAmelCase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
_lowerCAmelCase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowerCAmelCase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
_lowerCAmelCase , _lowerCAmelCase =check_copies.convert_to_localized_md(
__A , __A , localized_readme['format_model_list'] )
self.assertFalse(__A )
self.assertEqual(__A , __A )
_lowerCAmelCase , _lowerCAmelCase =check_copies.convert_to_localized_md(
__A , __A , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
_lowerCAmelCase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
_lowerCAmelCase =(
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowerCAmelCase =(
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
_lowerCAmelCase , _lowerCAmelCase =check_copies.convert_to_localized_md(
__A , __A , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(__A , __A )
| 58
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 1
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
_lowerCAmelCase =[
[],
[],
[],
]
def UpperCamelCase__ ( self , __A , __A ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(__A )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def UpperCamelCase__ ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ) -> Dict:
_lowerCAmelCase =[]
def UpperCamelCase__ ( self , __A ) -> None:
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(__A )
def UpperCamelCase__ ( self ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
_lowerCAmelCase =min(self.queue )
self.queue.remove(__A )
return data
def __str__( self ) -> str:
return str(self.queue )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(a__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(a__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 58
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowercase_ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowercase_ = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase =bs[:]
_lowerCAmelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(a__ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase =[chr(a__ ) for n in cs]
return dict(zip(a__ , a__ ) )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
_lowerCAmelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase =char
return pairs
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] = ['input_ids', 'attention_mask']
def __init__( self , __A , __A , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , **__A , ) -> int:
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase =json.load(__A )
_lowerCAmelCase ={v: k for k, v in self.encoder.items()}
_lowerCAmelCase =errors # how to handle errors in decoding
_lowerCAmelCase =bytes_to_unicode()
_lowerCAmelCase ={v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase =merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase =[tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase =dict(zip(__A , range(len(__A ) ) ) )
_lowerCAmelCase ={}
_lowerCAmelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase =re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase__ ( self ) -> Any:
return len(self.encoder )
def UpperCamelCase__ ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , __A ) -> List[str]:
if token in self.cache:
return self.cache[token]
_lowerCAmelCase =tuple(__A )
_lowerCAmelCase =get_pairs(__A )
if not pairs:
return token
while True:
_lowerCAmelCase =min(__A , key=lambda __A : self.bpe_ranks.get(__A , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase =bigram
_lowerCAmelCase =[]
_lowerCAmelCase =0
while i < len(__A ):
try:
_lowerCAmelCase =word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase =j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase =tuple(__A )
_lowerCAmelCase =new_word
if len(__A ) == 1:
break
else:
_lowerCAmelCase =get_pairs(__A )
_lowerCAmelCase =' '.join(__A )
_lowerCAmelCase =word
return word
def UpperCamelCase__ ( self , __A ) -> int:
_lowerCAmelCase =[]
for token in re.findall(self.pat , __A ):
_lowerCAmelCase =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ ( self , __A ) -> int:
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , __A ) -> Tuple:
return self.decoder.get(__A )
def UpperCamelCase__ ( self , __A ) -> int:
_lowerCAmelCase =''.join(__A )
_lowerCAmelCase =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase =os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '\n' )
_lowerCAmelCase =0
with open(__A , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase =token_index
writer.write(' '.join(__A ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
_lowerCAmelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , __A , __A=False , **__A ) -> List[Any]:
_lowerCAmelCase =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
_lowerCAmelCase =' ' + text
return (text, kwargs)
| 58
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowercase_ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowercase_ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =simple_accuracy(a__ , a__ )
_lowerCAmelCase =float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =float(pearsonr(a__ , a__ )[0] )
_lowerCAmelCase =float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> int:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCamelCase__ ( self , __A , __A ) -> str:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "stsb":
return pearson_and_spearman(__A , __A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__A , __A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 58
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = 'vit'
def __init__( self , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.0 , __A=0.0 , __A=0.02 , __A=1E-12 , __A=224 , __A=16 , __A=3 , __A=True , __A=16 , **__A , ) -> Tuple:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =encoder_stride
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[Any] = version.parse('1.11')
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
if openai_config_file == "":
_lowerCAmelCase =OpenAIGPTConfig()
else:
_lowerCAmelCase =OpenAIGPTConfig.from_json_file(a__ )
_lowerCAmelCase =OpenAIGPTModel(a__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a__ , a__ , a__ )
# Save pytorch-model
_lowerCAmelCase =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , a__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase_ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 58
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 58
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'gpt_neox'
def __init__( self , __A=5_0432 , __A=6144 , __A=44 , __A=64 , __A=2_4576 , __A="gelu" , __A=0.25 , __A=1_0000 , __A=0.0 , __A=0.0 , __A=0.1 , __A=2048 , __A=0.02 , __A=1E-5 , __A=True , __A=0 , __A=2 , __A=False , __A=True , __A=None , **__A , ) -> str:
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =rotary_pct
_lowerCAmelCase =rotary_emb_base
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =hidden_dropout
_lowerCAmelCase =classifier_dropout
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =use_cache
_lowerCAmelCase =tie_word_embeddings
_lowerCAmelCase =use_parallel_residual
_lowerCAmelCase =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def UpperCamelCase__ ( self ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
_lowerCAmelCase =self.rope_scaling.get('type' , __A )
_lowerCAmelCase =self.rope_scaling.get('factor' , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 58
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A = None , __A = None , __A=None , __A=None ) -> Tuple:
if not conversation_id:
_lowerCAmelCase =uuid.uuida()
if past_user_inputs is None:
_lowerCAmelCase =[]
if generated_responses is None:
_lowerCAmelCase =[]
_lowerCAmelCase =conversation_id
_lowerCAmelCase =past_user_inputs
_lowerCAmelCase =generated_responses
_lowerCAmelCase =text
def __eq__( self , __A ) -> int:
if not isinstance(__A , __A ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCamelCase__ ( self , __A , __A = False ) -> Any:
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
_lowerCAmelCase =text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
_lowerCAmelCase =text
def UpperCamelCase__ ( self ) -> List[str]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_lowerCAmelCase =None
def UpperCamelCase__ ( self , __A ) -> Optional[Any]:
self.generated_responses.append(__A )
def UpperCamelCase__ ( self ) -> Optional[int]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Dict:
_lowerCAmelCase =F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
_lowerCAmelCase ='user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__lowercase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> Tuple:
super().__init__(*__A , **__A )
if self.tokenizer.pad_token_id is None:
_lowerCAmelCase =self.tokenizer.eos_token
def UpperCamelCase__ ( self , __A=None , __A=None , __A=None , **__A ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase ={}
_lowerCAmelCase ={}
if min_length_for_response is not None:
_lowerCAmelCase =min_length_for_response
if minimum_tokens is not None:
_lowerCAmelCase =minimum_tokens
if "max_length" in generate_kwargs:
_lowerCAmelCase =generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__A )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __A , __A=0 , **__A ) -> List[str]:
_lowerCAmelCase =super().__call__(__A , num_workers=__A , **__A )
if isinstance(__A , __A ) and len(__A ) == 1:
return outputs[0]
return outputs
def UpperCamelCase__ ( self , __A , __A=32 ) -> Dict[str, Any]:
if not isinstance(__A , __A ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_lowerCAmelCase =self.tokenizer._build_conversation_input_ids(__A )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_lowerCAmelCase =self._legacy_parse_and_tokenize(__A )
if self.framework == "pt":
_lowerCAmelCase =torch.LongTensor([input_ids] )
elif self.framework == "tf":
_lowerCAmelCase =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCamelCase__ ( self , __A , __A=10 , **__A ) -> int:
_lowerCAmelCase =generate_kwargs.get('max_length' , self.model.config.max_length )
_lowerCAmelCase =model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
_lowerCAmelCase =max_length - minimum_tokens
_lowerCAmelCase =model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_lowerCAmelCase =model_inputs['attention_mask'][:, -trim:]
_lowerCAmelCase =model_inputs.pop('conversation' )
_lowerCAmelCase =max_length
_lowerCAmelCase =self.model.generate(**__A , **__A )
if self.model.config.is_encoder_decoder:
_lowerCAmelCase =1
else:
_lowerCAmelCase =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCamelCase__ ( self , __A , __A=True ) -> Dict:
_lowerCAmelCase =model_outputs['output_ids']
_lowerCAmelCase =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , )
_lowerCAmelCase =model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(__A )
return conversation
def UpperCamelCase__ ( self , __A ) -> Dict:
_lowerCAmelCase =self.tokenizer.eos_token_id
_lowerCAmelCase =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__A , add_special_tokens=__A ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__A , add_special_tokens=__A ) )
if len(__A ) > self.tokenizer.model_max_length:
_lowerCAmelCase =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 58
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = True
lowercase : bool = False
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
if self.add_downsample:
_lowerCAmelCase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A=True ) -> Optional[Any]:
_lowerCAmelCase =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase =self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : bool = True
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
if self.add_downsample:
_lowerCAmelCase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A=True ) -> Optional[int]:
_lowerCAmelCase =()
for resnet in self.resnets:
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase =self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = True
lowercase : bool = False
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase =self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
if self.add_upsample:
_lowerCAmelCase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A , __A=True ) -> Any:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCAmelCase =res_hidden_states_tuple[-1]
_lowerCAmelCase =res_hidden_states_tuple[:-1]
_lowerCAmelCase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
if self.add_upsample:
_lowerCAmelCase =self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : bool = True
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase =self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
if self.add_upsample:
_lowerCAmelCase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A=True ) -> Any:
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase =res_hidden_states_tuple[-1]
_lowerCAmelCase =res_hidden_states_tuple[:-1]
_lowerCAmelCase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
if self.add_upsample:
_lowerCAmelCase =self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> int:
# there is always at least one resnet
_lowerCAmelCase =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCAmelCase =[]
for _ in range(self.num_layers ):
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
def __call__( self , __A , __A , __A , __A=True ) -> int:
_lowerCAmelCase =self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
return hidden_states
| 58
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 1
|
'''simple docstring'''
import pprint
import requests
lowercase_ = '''https://zenquotes.io/api'''
def UpperCamelCase__ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCamelCase__ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 1
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = (DPMSolverSDEScheduler,)
lowercase : Dict = 10
def UpperCamelCase__ ( self , **__A ) -> Union[str, Any]:
_lowerCAmelCase ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**__A )
return config
def UpperCamelCase__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def UpperCamelCase__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def UpperCamelCase__ ( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A , use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 58
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCamelCase__ ( a__ = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda a__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 58
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 1
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = (DDIMParallelScheduler,)
lowercase : Union[str, Any] = (('eta', 0.0), ('num_inference_steps', 50))
def UpperCamelCase__ ( self , **__A ) -> List[str]:
_lowerCAmelCase ={
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__A )
return config
def UpperCamelCase__ ( self , **__A ) -> str:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(**__A )
_lowerCAmelCase =scheduler_class(**__A )
_lowerCAmelCase , _lowerCAmelCase =10, 0.0
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def UpperCamelCase__ ( self ) -> Optional[Any]:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase__ ( self ) -> Tuple:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def UpperCamelCase__ ( self ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase__ ( self ) -> Dict:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def UpperCamelCase__ ( self ) -> Dict:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def UpperCamelCase__ ( self ) -> int:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def UpperCamelCase__ ( self ) -> Optional[int]:
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def UpperCamelCase__ ( self ) -> Optional[Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__A )
def UpperCamelCase__ ( self ) -> Any:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
_lowerCAmelCase , _lowerCAmelCase =10, 0.0
scheduler.set_timesteps(__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter
_lowerCAmelCase =self.dummy_sample_deter + 0.1
_lowerCAmelCase =self.dummy_sample_deter - 0.1
_lowerCAmelCase =samplea.shape[0]
_lowerCAmelCase =torch.stack([samplea, samplea, samplea] , dim=0 )
_lowerCAmelCase =torch.arange(__A )[0:3, None].repeat(1 , __A )
_lowerCAmelCase =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_lowerCAmelCase =scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.full_loop()
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
# We specify different beta, so that the first alpha is 0.99
_lowerCAmelCase =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def UpperCamelCase__ ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
_lowerCAmelCase =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 58
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.