code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Any = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowercase = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__lowercase = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__lowercase = [parquet_path]
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowercase = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=("train",) ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
__lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowercase = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowercase = features.copy() if features else default_expected_features
__lowercase = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowercase = ParquetDatasetReader({"""train""": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if split:
__lowercase = {split: parquet_path}
else:
__lowercase = """train"""
__lowercase = {"""train""": parquet_path, """test""": parquet_path}
__lowercase = tmp_path / """cache"""
__lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__lowercase = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = ParquetDatasetWriter(_lowerCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__lowercase = pq.ParquetFile(tmp_path / """foo.parquet""" )
__lowercase = pf.read()
assert dataset.data.table == output_table
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = str(shared_datadir / """test_image_rgb.jpg""" )
__lowercase = {"""image""": [image_path]}
__lowercase = Features({"""image""": Image()} )
__lowercase = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
__lowercase = ParquetDatasetWriter(_lowerCamelCase , tmp_path / """foo.parquet""" )
assert writer.write() > 0
__lowercase = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
__lowercase = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert get_writer_batch_size(_lowerCamelCase ) == expected
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
from __future__ import annotations
class __UpperCamelCase :
def __init__( self : str , _lowerCAmelCase : Tuple = 0 ) -> int:
"""simple docstring"""
__lowercase = key
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__lowercase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def _a ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__lowercase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A__ ) ^ key ) for ch in content]
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] = 0 ) -> str:
"""simple docstring"""
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__lowercase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__lowercase = """"""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def _a ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Dict = 0 ) -> Optional[int]:
"""simple docstring"""
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
__lowercase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
__lowercase = """"""
for ch in content:
ans += chr(ord(A__ ) ^ key )
return ans
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict = 0 ) -> Optional[int]:
"""simple docstring"""
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A__ , A__ ) )
except OSError:
return False
return True
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
try:
with open(A__ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A__ , A__ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
try:
__lowercase = float(lowerCAmelCase__ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__lowercase = decimal - int(lowerCAmelCase__ )
if fractional_part == 0:
return int(lowerCAmelCase__ ), 1
else:
__lowercase = len(str(lowerCAmelCase__ ).split(""".""" )[1] )
__lowercase = int(decimal * (10**number_of_frac_digits) )
__lowercase = 10**number_of_frac_digits
__lowercase , __lowercase = denominator, numerator
while True:
__lowercase = dividend % divisor
if remainder == 0:
break
__lowercase , __lowercase = divisor, remainder
__lowercase , __lowercase = numerator / divisor, denominator / divisor
return int(lowerCAmelCase__ ), int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(8_9.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase : List[str] = None
try:
import msvcrt
except ImportError:
__UpperCamelCase : Optional[Any] = None
try:
import fcntl
except ImportError:
__UpperCamelCase : List[str] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase : Tuple = OSError
# Data
# ------------------------------------------------
__UpperCamelCase : Optional[int] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__UpperCamelCase : List[str] = """3.0.12"""
__UpperCamelCase : Tuple = None
def snake_case ( ):
'''simple docstring'''
global _logger
__lowercase = _logger or logging.getLogger(__name__ )
return _logger
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Dict , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = lock_file
return None
def __str__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = lock
return None
def __enter__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.lock
def __exit__( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str:
"""simple docstring"""
self.lock.release()
return None
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=-1 , _lowerCAmelCase : List[Any]=None ) -> Any:
"""simple docstring"""
__lowercase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase = self.hash_filename_if_too_long(UpperCamelCase__ , UpperCamelCase__ )
# The path to the lock file.
__lowercase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase = None
# The default timeout value.
__lowercase = timeout
# We use this lock primarily for the lock counter.
__lowercase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase = 0
return None
@property
def _a ( self : str ) -> Tuple:
"""simple docstring"""
return self._lock_file
@property
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def _a ( self : Tuple , _lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowercase = float(UpperCamelCase__ )
return None
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
@property
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def _a ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[str]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase = id(self )
__lowercase = self._lock_file
__lowercase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(UpperCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase = id(self )
__lowercase = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__lowercase = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.release(force=UpperCamelCase__ )
return None
def _a ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowercase = os.path.basename(UpperCamelCase__ )
if len(UpperCamelCase__ ) > max_length and max_length > 0:
__lowercase = os.path.dirname(UpperCamelCase__ )
__lowercase = str(hash(UpperCamelCase__ ) )
__lowercase = filename[: max_length - len(UpperCamelCase__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(UpperCamelCase__ , UpperCamelCase__ )
else:
return path
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str=-1 , _lowerCAmelCase : Dict=None ) -> Union[str, Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(UpperCamelCase__ , timeout=UpperCamelCase__ , max_filename_length=UpperCamelCase__ )
__lowercase = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase = os.open(self._lock_file , UpperCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCamelCase__ )
else:
__lowercase = fd
return None
def _a ( self : str ) -> int:
"""simple docstring"""
__lowercase = self._lock_file_fd
__lowercase = None
msvcrt.locking(UpperCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=-1 , _lowerCAmelCase : int=None ) -> Any:
"""simple docstring"""
__lowercase = os.statvfs(os.path.dirname(UpperCamelCase__ ) ).f_namemax
super().__init__(UpperCamelCase__ , timeout=UpperCamelCase__ , max_filename_length=UpperCamelCase__ )
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase = os.open(self._lock_file , UpperCamelCase__ )
try:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCamelCase__ )
else:
__lowercase = fd
return None
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase = self._lock_file_fd
__lowercase = None
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
os.close(UpperCamelCase__ )
return None
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase = os.open(self._lock_file , UpperCamelCase__ )
except OSError:
pass
else:
__lowercase = fd
return None
def _a ( self : str ) -> List[str]:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase : str = None
if msvcrt:
__UpperCamelCase : int = WindowsFileLock
elif fcntl:
__UpperCamelCase : Tuple = UnixFileLock
else:
__UpperCamelCase : List[str] = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( snake_case_ ):
__snake_case :List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : str , **_lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowercase = deprecated_arg[3:]
__lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
__lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
__lowercase = kwargs.pop("""device_idx""" , self.device_idx )
__lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
__lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__snake_case :str = field(
default=snake_case_ , metadata={'help': 'Name of TPU'} , )
__snake_case :int = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
__snake_case :bool = field(default=snake_case_ , metadata={'help': 'Benchmark models in eager model.'} )
__snake_case :bool = field(
default=snake_case_ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
__lowercase = None
if self.tpu:
try:
if self.tpu_name:
__lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowercase = None
return tpu
@cached_property
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__lowercase = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__lowercase = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def _a ( self : Any ) -> int:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self : int ) -> int:
"""simple docstring"""
return self.n_gpu > 0
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCamelCase : Optional[int] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
def snake_case ( lowerCamelCase=28_123 ):
'''simple docstring'''
__lowercase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowercase = set()
__lowercase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(SCREAMING_SNAKE_CASE_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase , __lowercase ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = filter(lambda lowerCamelCase : p.requires_grad , model.parameters() )
__lowercase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : Dict = logging.getLogger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if metric == "rouge2":
__lowercase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__lowercase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__lowercase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""" )
__lowercase = ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'val_{metric}' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return EarlyStopping(
monitor=F'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=__snake_case , verbose=__snake_case , )
class __UpperCamelCase ( pl.Callback ):
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
__lowercase = {F'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def _a ( self : Any , _lowerCAmelCase : pl.Trainer , _lowerCAmelCase : pl.LightningModule , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=True ) -> Dict:
"""simple docstring"""
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__lowercase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__lowercase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowercase = od / """test_results.txt"""
__lowercase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowercase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__lowercase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , """a+""" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowercase = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowercase = val.item()
__lowercase = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowercase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_A )
@rank_zero_only
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
__lowercase = pl_module.model.model.num_parameters()
except AttributeError:
__lowercase = pl_module.model.num_parameters()
__lowercase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self : str , _lowerCAmelCase : pl.Trainer , _lowerCAmelCase : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , """test""" )
@rank_zero_only
def _a ( self : Optional[Any] , _lowerCAmelCase : pl.Trainer , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __UpperCamelCase :
def __init__( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : str=16 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Optional[int]=None , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : int ) -> Any:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ )
__lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
__lowercase = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = True
__lowercase = True
__lowercase = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )["""hidden_states"""][0]
__lowercase = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __a , __a , __a , unittest.TestCase ):
__snake_case :Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__snake_case :Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__snake_case :Union[str, Any] = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Union[str, Any] = False
__snake_case :Optional[int] = False
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = OpenLlamaModelTester(self )
__lowercase = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*snake_case__ )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(snake_case__ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """single_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(snake_case__ )
__lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = """multi_label_classification"""
__lowercase = input_dict["""input_ids"""]
__lowercase = input_ids.ne(1 ).to(snake_case__ )
__lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowercase = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Union[str, Any] , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
__lowercase = original_model(snake_case__ ).last_hidden_state
__lowercase = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
__lowercase = scaled_model(snake_case__ ).last_hidden_state
__lowercase = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) )
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
import unittest
from transformers import DonutProcessor
__UpperCamelCase : str = """naver-clova-ix/donut-base"""
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = DonutProcessor.from_pretrained(a_ )
def _a ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__lowercase = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__lowercase = self.processor.tokenajson(a_ )
self.assertDictEqual(a_ , a_ )
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [0 for i in range(len(_snake_case ) )]
# initialize interval's left pointer and right pointer
__lowercase , __lowercase = 0, 0
for i in range(1 , len(_snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowercase = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__lowercase = min_edge
while go_next(_snake_case , _snake_case , _snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowercase , __lowercase = i, i + z_result[i] - 1
return z_result
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return i + z_result[i] < len(_snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowercase = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
from __future__ import annotations
from math import pi
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import gcd
def snake_case ( lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 3 , ):
'''simple docstring'''
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
return (pow(A_ , 2 ) + step) % modulus
for _ in range(A_ ):
# These track the position within the cycle detection logic.
__lowercase = seed
__lowercase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__lowercase = rand_fn(A_ , A_ , A_ )
__lowercase = rand_fn(A_ , A_ , A_ )
__lowercase = rand_fn(A_ , A_ , A_ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__lowercase = gcd(hare - tortoise , A_ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__lowercase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
__UpperCamelCase : Dict = parser.parse_args()
__UpperCamelCase : List[Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
__UpperCamelCase : Tuple = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Any = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
__UpperCamelCase : Any = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( snake_case__ ):
__snake_case :Dict = VOCAB_FILES_NAMES
__snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case :List[str] = ['''input_ids''', '''attention_mask''']
__snake_case :Optional[int] = NllbTokenizer
__snake_case :List[int] = []
__snake_case :List[int] = []
def __init__( self : Any , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : int=None , _lowerCAmelCase : Dict="<s>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : Optional[Any]="<s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : Tuple="<mask>" , _lowerCAmelCase : Any=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[Any]=False , **_lowerCAmelCase : Optional[int] , ) -> Dict:
"""simple docstring"""
__lowercase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
__lowercase = legacy_behaviour
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , legacy_behaviour=_A , **_A , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
__lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__lowercase = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowercase = src_lang if src_lang is not None else 'eng_Latn'
__lowercase = self.convert_tokens_to_ids(self._src_lang )
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : List[str] , _lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : str = None ) -> Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any = None ) -> Any:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowercase = src_lang
__lowercase = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__lowercase = self.convert_tokens_to_ids(_A )
__lowercase = tgt_lang_id
return inputs
def _a ( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int = "eng_Latn" , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : str = "fra_Latn" , **_lowerCAmelCase : Dict , ) -> List[str]:
"""simple docstring"""
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Any , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase = [self.cur_lang_code]
__lowercase = [self.eos_token_id]
__lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self : Any , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.convert_tokens_to_ids(_A )
if self.legacy_behaviour:
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase = [self.cur_lang_code]
__lowercase = [self.eos_token_id]
__lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict = None ) -> Union[str, Any]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
__lowercase = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__UpperCamelCase : int = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__UpperCamelCase : Tuple = """main"""
# Default branch name
__UpperCamelCase : Tuple = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
__UpperCamelCase : Tuple = """aaaaaaa"""
# This commit does not exist, so we should 404.
__UpperCamelCase : List[Any] = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
__UpperCamelCase : str = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def snake_case ( ):
'''simple docstring'''
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def snake_case ( ):
'''simple docstring'''
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __UpperCamelCase ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _a ( self : Any , _lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _a ( self : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def _a ( self : str , _lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(find_labels(__a ) , ["""labels"""] )
self.assertEqual(find_labels(__a ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(__a ) , ["""start_positions""", """end_positions"""] )
class __UpperCamelCase ( _lowerCAmelCase ):
pass
self.assertEqual(find_labels(__a ) , ["""labels"""] )
@require_tf
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(__a ) , ["""labels"""] )
self.assertEqual(find_labels(__a ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(__a ) , ["""start_positions""", """end_positions"""] )
class __UpperCamelCase ( _lowerCAmelCase ):
pass
self.assertEqual(find_labels(__a ) , ["""labels"""] )
@require_flax
def _a ( self : str ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
self.assertEqual(find_labels(__a ) , [] )
class __UpperCamelCase ( _lowerCAmelCase ):
pass
self.assertEqual(find_labels(__a ) , [] )
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
from itertools import permutations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowercase = [7, 11, 13, 17]
for i, test in enumerate(lowerCamelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case ( lowerCamelCase = 10 ):
'''simple docstring'''
return sum(
int("""""".join(map(lowerCamelCase_ , lowerCamelCase_ ) ) )
for num in permutations(range(lowerCamelCase_ ) )
if is_substring_divisible(lowerCamelCase_ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
__UpperCamelCase : Dict = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
with open(A__ , """r""" ) as file:
for line_number, line in enumerate(A__ ):
__lowercase = line.strip()
if line:
__lowercase = line.split()
__lowercase = line_number
__lowercase = words[0]
__lowercase = value
return result
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowercase = getattr(A__ , A__ )
__lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A__ ):
__lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__lowercase = """param"""
if weight_type is not None and weight_type != "param":
__lowercase = getattr(A__ , A__ ).shape
elif weight_type is not None and weight_type == "param":
__lowercase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__lowercase = getattr(A__ , A__ )
__lowercase = shape_pointer.shape
# let's reduce dimension
__lowercase = value[0]
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__lowercase = getattr(A__ , A__ )
__lowercase = value
else:
__lowercase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A__ ):
__lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__lowercase = """param"""
if weight_type is not None and weight_type != "param":
__lowercase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowercase = """.""".join([key, hf_param_name] )
else:
__lowercase = key
__lowercase = value if """lm_head""" in full_key else value[0]
__UpperCamelCase : Dict = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
__lowercase = False
for key, mapped_key in MAPPING.items():
__lowercase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split(""".""" )[-2]
__lowercase = mapped_key.replace("""*""" , A__ )
if "weight_g" in name:
__lowercase = """weight_g"""
elif "weight_v" in name:
__lowercase = """weight_v"""
elif "bias" in name:
__lowercase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = """weight"""
else:
__lowercase = None
if hf_dict is not None:
rename_dict(A__ , A__ , A__ , A__ , A__ )
else:
set_recursively(A__ , A__ , A__ , A__ , A__ )
return is_used
return is_used
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase = True
else:
__lowercase = load_wavaveca_layer(A__ , A__ , A__ )
if not is_used:
unused_weights.append(A__ )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = full_name.split("""conv_layers.""" )[-1]
__lowercase = name.split(""".""" )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=False ):
'''simple docstring'''
if config_path is not None:
__lowercase = WavaVecaConfig.from_pretrained(A__ )
else:
__lowercase = WavaVecaConfig()
if is_seq_class:
__lowercase = read_txt_into_dict(A__ )
__lowercase = idalabel
__lowercase = WavaVecaForSequenceClassification(A__ )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
feature_extractor.save_pretrained(A__ )
elif is_finetuned:
if dict_path:
__lowercase = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(A__ , """vocab.json""" )
if not os.path.isdir(A__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
__lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase = 0
__lowercase = 1
with open(A__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(A__ , A__ )
__lowercase = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=A__ , )
__lowercase = True if config.feat_extract_norm == """layer""" else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
__lowercase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
__lowercase = WavaVecaForCTC(A__ )
else:
__lowercase = WavaVecaForPreTraining(A__ )
if is_finetuned or is_seq_class:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase = argparse.Namespace(task="""audio_pretraining""" )
__lowercase = fairseq.tasks.setup_task(A__ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ )
__lowercase = model[0].eval()
recursively_load_weights(A__ , A__ , not is_finetuned )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
__UpperCamelCase : Dict = parser.parse_args()
__UpperCamelCase : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class __UpperCamelCase ( _UpperCamelCase ):
__snake_case :Optional[Any] = 'falcon'
__snake_case :Tuple = ['past_key_values']
def __init__( self : List[Any] , _lowerCAmelCase : int=6_5024 , _lowerCAmelCase : int=4544 , _lowerCAmelCase : List[Any]=32 , _lowerCAmelCase : str=71 , _lowerCAmelCase : Optional[Any]=1e-5 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : Union[str, Any]=11 , _lowerCAmelCase : Optional[Any]=11 , **_lowerCAmelCase : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
# Backward compatibility with n_embed kwarg
__lowercase = kwargs.pop("""n_embed""" , _lowerCAmelCase )
__lowercase = hidden_size if n_embed is None else n_embed
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = num_attention_heads if num_kv_heads is None else num_kv_heads
__lowercase = alibi
__lowercase = new_decoder_architecture
__lowercase = multi_query # Ignored when new_decoder_architecture is True
__lowercase = parallel_attn
__lowercase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return not self.alibi
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __UpperCamelCase :
__snake_case :CommonSchedulerState
# setable values
__snake_case :jnp.ndarray
__snake_case :jnp.ndarray
__snake_case :Optional[int] = None
@classmethod
def _a ( cls : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
return cls(common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
@dataclass
class __UpperCamelCase ( __lowerCAmelCase ):
__snake_case :DDPMSchedulerState
class __UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
__snake_case :List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__snake_case :jnp.dtype
@property
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return True
@register_to_config
def __init__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] = 1000 , _lowerCAmelCase : List[Any] = 0.0_001 , _lowerCAmelCase : List[str] = 0.02 , _lowerCAmelCase : List[str] = "linear" , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : List[str] = "fixed_small" , _lowerCAmelCase : Optional[Any] = True , _lowerCAmelCase : Any = "epsilon" , _lowerCAmelCase : int = jnp.floataa , ) -> Tuple:
"""simple docstring"""
__lowercase = dtype
def _a ( self : Dict , _lowerCAmelCase : List[Any] = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
__lowercase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowercase = jnp.array(1.0 , dtype=self.dtype )
__lowercase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def _a ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str] = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def _a ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str = () ) -> DDPMSchedulerState:
"""simple docstring"""
__lowercase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowercase = (jnp.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None ) -> Any:
"""simple docstring"""
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowercase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowercase = jnp.clip(lowerCAmelCase_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowercase = jnp.log(jnp.clip(lowerCAmelCase_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
__lowercase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowercase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowercase = variance
__lowercase = state.common.betas[t]
__lowercase = (predicted_variance + 1) / 2
__lowercase = frac * max_log + (1 - frac) * min_log
return variance
def _a ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict = None , _lowerCAmelCase : List[Any] = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowercase = timestep
if key is None:
__lowercase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = jnp.split(lowerCAmelCase_ , sample.shape[1] , axis=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = state.common.alphas_cumprod[t]
__lowercase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase = model_output
elif self.config.prediction_type == "v_prediction":
__lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase = jnp.clip(lowerCAmelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowercase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowercase = jax.random.split(lowerCAmelCase_ , num=1 )
__lowercase = jax.random.normal(lowerCAmelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
__lowercase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , state=lowerCAmelCase_ )
def _a ( self : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __len__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'}
__UpperCamelCase : int = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__UpperCamelCase : List[str] = {
'facebook/esm2_t6_8M_UR50D': 1024,
'facebook/esm2_t12_35M_UR50D': 1024,
}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(lowerCamelCase , """r""" ) as f:
__lowercase = f.read().splitlines()
return [l.strip() for l in lines]
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Any = VOCAB_FILES_NAMES
__snake_case :str = PRETRAINED_VOCAB_FILES_MAP
__snake_case :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : Dict="<cls>" , _lowerCAmelCase : Dict="<pad>" , _lowerCAmelCase : Optional[Any]="<mask>" , _lowerCAmelCase : int="<eos>" , **_lowerCAmelCase : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
__lowercase = load_vocab_file(UpperCAmelCase__ )
__lowercase = dict(enumerate(self.all_tokens ) )
__lowercase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
__lowercase = unk_token
__lowercase = cls_token
__lowercase = pad_token
__lowercase = mask_token
__lowercase = eos_token
__lowercase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def _a ( self : int , _lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return self._id_to_token.get(UpperCAmelCase__ , self.unk_token )
def _a ( self : Optional[Any] , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
return self._token_to_id.get(UpperCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def _a ( self : Optional[int] , _lowerCAmelCase : int , **_lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
return text.split()
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int]=False ) -> Dict:
"""simple docstring"""
return len(self._id_to_token )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def _a ( self : List[Any] , _lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self._token_to_id.get(UpperCAmelCase__ , self._token_to_id.get(self.unk_token ) )
def _a ( self : str , _lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return self._id_to_token.get(UpperCAmelCase__ , self.unk_token )
def _a ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
__lowercase = [self.cls_token_id]
__lowercase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def _a ( self : List[str] , _lowerCAmelCase : List , _lowerCAmelCase : Optional[List] = None , _lowerCAmelCase : bool = False ) -> Optional[Any]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
__lowercase = [1] + ([0] * len(UpperCAmelCase__ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase__ ) + [1]
return mask
def _a ( self : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowercase = os.path.join(UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(UpperCAmelCase__ , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def _a ( self : Tuple ) -> str:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=UpperCAmelCase__ )
def _a ( self : List[Any] , _lowerCAmelCase : Union[List[str], List[AddedToken]] , _lowerCAmelCase : bool = False ) -> str:
"""simple docstring"""
return super()._add_tokens(UpperCAmelCase__ , special_tokens=UpperCAmelCase__ )
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
__UpperCamelCase : Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
with open(lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(lowerCamelCase ):
__lowercase = line.strip()
if line:
__lowercase = line.split()
__lowercase = line_number
__lowercase = words[0]
__lowercase = value
return result
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
__lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase ):
__lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__lowercase = "param"
if weight_type is not None and weight_type != "param":
__lowercase = getattr(lowerCamelCase , lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
__lowercase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
__lowercase = shape_pointer.shape
# let's reduce dimension
__lowercase = value[0]
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__lowercase = getattr(lowerCamelCase , lowerCamelCase )
__lowercase = value
else:
__lowercase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase ):
__lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__lowercase = "param"
if weight_type is not None and weight_type != "param":
__lowercase = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowercase = ".".join([key, hf_param_name] )
else:
__lowercase = key
__lowercase = value if "lm_head" in full_key else value[0]
__UpperCamelCase : int = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
'''simple docstring'''
__lowercase = False
for key, mapped_key in MAPPING.items():
__lowercase = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(lowerCamelCase )[0].split(""".""" )[-2]
__lowercase = mapped_key.replace("""*""" , lowerCamelCase )
if "weight_g" in name:
__lowercase = "weight_g"
elif "weight_v" in name:
__lowercase = "weight_v"
elif "bias" in name:
__lowercase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = "weight"
else:
__lowercase = None
if hf_dict is not None:
rename_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return is_used
return is_used
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__lowercase = True
else:
__lowercase = load_wavaveca_layer(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = full_name.split("""conv_layers.""" )[-1]
__lowercase = name.split(""".""" )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__lowercase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=False ):
'''simple docstring'''
if config_path is not None:
__lowercase = WavaVecaConfig.from_pretrained(lowerCamelCase )
else:
__lowercase = WavaVecaConfig()
if is_seq_class:
__lowercase = read_txt_into_dict(lowerCamelCase )
__lowercase = idalabel
__lowercase = WavaVecaForSequenceClassification(lowerCamelCase )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
feature_extractor.save_pretrained(lowerCamelCase )
elif is_finetuned:
if dict_path:
__lowercase = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(lowerCamelCase , """vocab.json""" )
if not os.path.isdir(lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
__lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase = 0
__lowercase = 1
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase , lowerCamelCase )
__lowercase = WavaVecaCTCTokenizer(
lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase , )
__lowercase = True if config.feat_extract_norm == "layer" else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
__lowercase = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
__lowercase = WavaVecaForCTC(lowerCamelCase )
else:
__lowercase = WavaVecaForPreTraining(lowerCamelCase )
if is_finetuned or is_seq_class:
__lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase = argparse.Namespace(task="""audio_pretraining""" )
__lowercase = fairseq.tasks.setup_task(lowerCamelCase )
__lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase )
__lowercase = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : List[str] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : str = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(__a , tokenizer_name + """Fast""" )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(__a , force_download=__a )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split("""/""" )
__lowercase = os.path.join(__a , __a )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(__a )[-1][0]
if next_char == "/":
__lowercase = os.path.join(__a , __a )
__lowercase = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowercase = tokenizer.save_pretrained(
__a , legacy_format=__a , filename_prefix=__a )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__a )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
from __future__ import annotations
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = text, pattern
__lowercase = len(snake_case_ ), len(snake_case_ )
def _a ( self : List[str] , _lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _a ( self : Any , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = []
for i in range(self.textLen - self.patLen + 1 ):
__lowercase = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
__lowercase = self.match_in_pattern(self.text[mismatch_index] )
__lowercase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCamelCase : List[Any] = """ABAABA"""
__UpperCamelCase : str = """AB"""
__UpperCamelCase : List[Any] = BoyerMooreSearch(text, pattern)
__UpperCamelCase : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
__lowercase = list(poly_a or [0] )[:]
__lowercase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
__lowercase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
__lowercase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
__lowercase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
__lowercase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
__lowercase = self.__multiply()
def _a ( self : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase__ ) <= 1:
return dft[0]
#
__lowercase = self.c_max_length // 2
while next_ncol > 0:
__lowercase = [[] for i in range(UpperCAmelCase__ )]
__lowercase = self.root**next_ncol
# First half of next step
__lowercase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
__lowercase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
__lowercase = new_dft
__lowercase = next_ncol // 2
return dft[0]
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.__dft("""A""" )
__lowercase = self.__dft("""B""" )
__lowercase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
__lowercase = 2
while next_ncol <= self.c_max_length:
__lowercase = [[] for i in range(UpperCAmelCase__ )]
__lowercase = self.root ** (next_ncol // 2)
__lowercase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
__lowercase = new_inverse_c
next_ncol *= 2
# Unpack
__lowercase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = '''A = ''' + ''' + '''.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
__lowercase = '''B = ''' + ''' + '''.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
__lowercase = '''A*B = ''' + ''' + '''.join(
F'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return F'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
__lowercase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowercase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __UpperCamelCase :
def __init__( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : Any=13 , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : int=32 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Any=37 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Any=None , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def _a ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = MegatronBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
__lowercase = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
__lowercase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = MegatronBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = MegatronBertForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = MegatronBertForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = MegatronBertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , next_sentence_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase = MegatronBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
__lowercase
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__snake_case :int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case :Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Union[str, Any] = True
# test_resize_embeddings = False
__snake_case :Optional[int] = False
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=False ) -> Optional[Any]:
"""simple docstring"""
__lowercase = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
__lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCamelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCamelCase )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCamelCase )
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCamelCase )
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCamelCase )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCamelCase )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCamelCase )
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return torch.tensor(
lowerCamelCase_ , dtype=torch.long , device=lowerCamelCase_ , )
__UpperCamelCase : str = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip("""Model is not available.""" )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ["""MYDIR"""] , __lowerCamelCase )
__lowercase = MegatronBertModel.from_pretrained(__lowerCamelCase )
model.to(__lowerCamelCase )
model.half()
__lowercase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
__lowercase = model(__lowerCamelCase )[0]
__lowercase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __lowerCamelCase )
__lowercase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = '''ii={} jj={} a={} b={}'''.format(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertTrue(math.isclose(__lowerCamelCase , __lowerCamelCase , rel_tol=__lowerCamelCase , abs_tol=__lowerCamelCase ) , msg=__lowerCamelCase )
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int=7 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=18 , _lowerCAmelCase : Optional[int]=30 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : List[str]=True , ) -> Optional[int]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size_divisor
__lowercase = do_rescale
def _a ( self : List[str] ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
__snake_case :Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = GLPNImageProcessingTester(self )
@property
def _a ( self : str ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size_divisor""" ) )
self.assertTrue(hasattr(_lowercase , """resample""" ) )
self.assertTrue(hasattr(_lowercase , """do_rescale""" ) )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = 1.0e4 , lowerCamelCase = False , lowerCamelCase = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'Embedding dimension {embedding_dim} should be even'
__lowercase = float(embedding_dim // 2 )
__lowercase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowercase = min_timescale * jnp.exp(jnp.arange(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) * -log_timescale_increment )
__lowercase = jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) * jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
# scale embeddings
__lowercase = scale * emb
if flip_sin_to_cos:
__lowercase = jnp.concatenate([jnp.cos(__SCREAMING_SNAKE_CASE ), jnp.sin(__SCREAMING_SNAKE_CASE )] , axis=1 )
else:
__lowercase = jnp.concatenate([jnp.sin(__SCREAMING_SNAKE_CASE ), jnp.cos(__SCREAMING_SNAKE_CASE )] , axis=1 )
__lowercase = jnp.reshape(__SCREAMING_SNAKE_CASE , [jnp.shape(__SCREAMING_SNAKE_CASE )[0], embedding_dim] )
return signal
class __UpperCamelCase ( nn.Module ):
__snake_case :List[str] = 3_2
__snake_case :str = jnp.floataa
@nn.compact
def __call__( self : List[Any] , _lowerCAmelCase : Tuple ) -> int:
"""simple docstring"""
__lowercase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(lowercase_ )
__lowercase = nn.silu(lowercase_ )
__lowercase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(lowercase_ )
return temb
class __UpperCamelCase ( nn.Module ):
__snake_case :str = 3_2
__snake_case :Optional[Any] = False
__snake_case :List[str] = 1
@nn.compact
def __call__( self : Any , _lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
return get_sinusoidal_embeddings(
lowercase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase = 192
__lowercase = 768
__lowercase = 12
__lowercase = 3
__lowercase = [800, 1_333]
__lowercase = False
elif yolos_name == "yolos_s_dWr":
__lowercase = 330
__lowercase = 14
__lowercase = 6
__lowercase = 1_320
elif "yolos_s" in yolos_name:
__lowercase = 384
__lowercase = 1_536
__lowercase = 12
__lowercase = 6
elif "yolos_b" in yolos_name:
__lowercase = [800, 1_344]
__lowercase = 91
__lowercase = """huggingface/label-files"""
__lowercase = """coco-detection-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: config.hidden_size, :]
__lowercase = in_proj_bias[: config.hidden_size]
__lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase = in_proj_weight[-config.hidden_size :, :]
__lowercase = in_proj_bias[-config.hidden_size :]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if "backbone" in name:
__lowercase = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowercase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowercase = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowercase = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowercase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowercase = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowercase = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowercase = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
__lowercase = key.split(""".""" )
__lowercase = int(key_split[2] )
__lowercase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[
dim : dim * 2, :
]
__lowercase = val[-dim:, :]
else:
__lowercase = val[:dim]
__lowercase = val[dim : dim * 2]
__lowercase = val[-dim:]
else:
__lowercase = val
return orig_state_dict
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_yolos_config(lowerCamelCase )
# load original state_dict
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
# load 🤗 model
__lowercase = YolosForObjectDetection(lowerCamelCase )
model.eval()
__lowercase = convert_state_dict(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase = 800 if yolos_name != """yolos_ti""" else 512
__lowercase = YolosImageProcessor(format="""coco_detection""" , size=lowerCamelCase )
__lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
__lowercase , __lowercase = outputs.logits, outputs.pred_boxes
__lowercase , __lowercase = None, None
if yolos_name == "yolos_ti":
__lowercase = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__lowercase = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__lowercase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__lowercase = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__lowercase = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__lowercase = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__lowercase = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
__lowercase = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__lowercase = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase , organization="""hustvl""" )
model.push_to_hub(lowerCamelCase , organization="""hustvl""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
from statistics import mean
import numpy as np
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
# Number of processes finished
__lowercase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowercase = [0] * no_of_process
# List to include calculation results
__lowercase = [0] * no_of_process
# Sort by arrival time.
__lowercase = [burst_time[i] for i in np.argsort(_lowerCamelCase )]
__lowercase = [process_name[i] for i in np.argsort(_lowerCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowercase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowercase = arrival_time[i]
__lowercase = 0
# Index showing the location of the process being performed
__lowercase = 0
# Saves the current response ratio.
__lowercase = 0
for i in range(0 , _lowerCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowercase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowercase = temp
__lowercase = i
# Calculate the turn around time
__lowercase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowercase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [0] * no_of_process
for i in range(0 , _lowerCamelCase ):
__lowercase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCamelCase : Any = 5
__UpperCamelCase : List[str] = ['A', 'B', 'C', 'D', 'E']
__UpperCamelCase : str = [1, 2, 3, 4, 5]
__UpperCamelCase : Optional[int] = [1, 2, 3, 4, 5]
__UpperCamelCase : Dict = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCamelCase : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__UpperCamelCase : List[str] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__UpperCamelCase : List[str] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def snake_case ( lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase , __lowercase = create_model(
"""HTSAT-tiny""" , """roberta""" , _SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
__lowercase = r""".*sequential.(\d+).*"""
__lowercase = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowercase = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__lowercase = re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(1 )
__lowercase = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(_SCREAMING_SNAKE_CASE )//3}.linear.' )
elif re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowercase = 1 if projecton_layer == 0 else 2
__lowercase = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowercase = value
__lowercase = mixed_qkv.size(0 ) // 3
__lowercase = mixed_qkv[:qkv_dim]
__lowercase = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowercase = mixed_qkv[qkv_dim * 2 :]
__lowercase = query_layer
__lowercase = key_layer
__lowercase = value_layer
else:
__lowercase = value
return model_state_dict
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase , __lowercase = init_clap(_SCREAMING_SNAKE_CASE , enable_fusion=_SCREAMING_SNAKE_CASE )
clap_model.eval()
__lowercase = clap_model.state_dict()
__lowercase = rename_state_dict(_SCREAMING_SNAKE_CASE )
__lowercase = ClapConfig()
__lowercase = enable_fusion
__lowercase = ClapModel(_SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__UpperCamelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( A_ ):
__snake_case :Tuple = '''umt5'''
__snake_case :List[Any] = ['''past_key_values''']
def __init__( self : Any , _lowerCAmelCase : Optional[Any]=25_0112 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : int=64 , _lowerCAmelCase : str=1024 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Dict=32 , _lowerCAmelCase : Union[str, Any]=128 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[Any]=1e-6 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : Optional[int]="gated-gelu" , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="T5Tokenizer" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=0 , **_lowerCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = vocab_size
__lowercase = d_model
__lowercase = d_kv
__lowercase = d_ff
__lowercase = num_layers
__lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowercase = num_heads
__lowercase = relative_attention_num_buckets
__lowercase = relative_attention_max_distance
__lowercase = dropout_rate
__lowercase = layer_norm_epsilon
__lowercase = initializer_factor
__lowercase = feed_forward_proj
__lowercase = use_cache
__lowercase = self.feed_forward_proj.split("""-""" )
__lowercase = act_info[-1]
__lowercase = act_info[0] == """gated"""
if len(_lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
__lowercase = """gelu_new"""
@property
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
return self.d_model
@property
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
return self.num_heads
@property
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.num_layers
class __UpperCamelCase ( A_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__lowercase = """past_encoder_sequence + sequence"""
__lowercase = {0: """batch"""}
__lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowercase = {0: """batch""", 1: """decoder_sequence"""}
__lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return 13
@property
def _a ( self : Tuple ) -> str:
"""simple docstring"""
return 5e-4
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for char in word:
__lowercase = ord(lowerCAmelCase__ )
if not _is_chinese_char(lowerCAmelCase__ ):
return 0
return 1
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
for token in tokens:
__lowercase = len(lowerCAmelCase__ ) > 1 and is_chinese(lowerCAmelCase__ )
if chinese_word:
word_set.add(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
return word_list
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__lowercase = max([len(lowerCAmelCase__ ) for w in chinese_word_set] )
__lowercase = bert_tokens
__lowercase = 0, len(lowerCAmelCase__ )
while start < end:
__lowercase = True
if is_chinese(bert_word[start] ):
__lowercase = min(end - start , lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , 1 , -1 ):
__lowercase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowercase = '##' + bert_word[j]
__lowercase = start + i
__lowercase = False
break
if single_word:
start += 1
return bert_word
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
__lowercase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
__lowercase = [get_chinese_word(lowerCAmelCase__ ) for r in res]
ltp_res.extend(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__lowercase = []
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
__lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
__lowercase = []
for input_ids, chinese_word in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = []
for id in input_ids:
__lowercase = bert_tokenizer._convert_id_to_token(lowerCAmelCase__ )
input_tokens.append(lowerCAmelCase__ )
__lowercase = add_sub_symbol(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase__ ):
if token[:2] == "##":
__lowercase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase__ ) == 1 and _is_chinese_char(ord(lowerCAmelCase__ ) ):
ref_id.append(lowerCAmelCase__ )
ref_ids.append(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
return ref_ids
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__lowercase = f.readlines()
__lowercase = [line.strip() for line in data if len(lowerCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowercase = LTP(args.ltp ) # faster in GPU device
__lowercase = BertTokenizer.from_pretrained(args.bert )
__lowercase = prepare_ref(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__lowercase = [json.dumps(lowerCAmelCase__ ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__UpperCamelCase : Dict = parser.parse_args()
main(args)
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
import functools
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ) or not all(isinstance(lowerCamelCase , lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(lowerCamelCase ) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(lowerCamelCase ) == 0:
return 0
if min(lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
__lowercase = set(lowerCamelCase )
@functools.cache
def dynamic_programming(lowerCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ["""CLIPFeatureExtractor"""]
__UpperCamelCase : Tuple = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Any = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : Any = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : Optional[int] = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : List[str] = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__UpperCamelCase : List[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__UpperCamelCase : Tuple = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__UpperCamelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCamelCase : Union[str, Any] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCamelCase : List[str] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __UpperCamelCase ( __UpperCAmelCase ):
__snake_case :Optional[Any] = VOCAB_FILES_NAMES
__snake_case :int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( __UpperCAmelCase ):
__snake_case :Optional[int] = VOCAB_FILES_NAMES
__snake_case :int = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :List[str] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__UpperCamelCase : Optional[int] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__UpperCamelCase : Any = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__UpperCAmelCase )
class __UpperCamelCase :
def __call__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[bool] = None , **_lowerCAmelCase : List[str] , ) -> Any:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
__lowercase = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [titles]
__lowercase = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [texts]
__lowercase = len(lowerCAmelCase_ )
__lowercase = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [questions] * n_passages
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
F'There should be as many titles than texts but got {len(lowerCAmelCase_ )} titles and {len(lowerCAmelCase_ )} texts.' )
__lowercase = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
__lowercase = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )["""input_ids"""]
__lowercase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
}
if return_attention_mask is not False:
__lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
def _a ( self : Optional[int] , _lowerCAmelCase : BatchEncoding , _lowerCAmelCase : DPRReaderOutput , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 4 , ) -> Tuple:
"""simple docstring"""
__lowercase = reader_input["""input_ids"""]
__lowercase , __lowercase , __lowercase = reader_output[:3]
__lowercase = len(lowerCAmelCase_ )
__lowercase = sorted(range(lowerCAmelCase_ ) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__ )
__lowercase = []
for doc_id in sorted_docs:
__lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase = sequence_ids.index(self.pad_token_id )
else:
__lowercase = len(lowerCAmelCase_ )
__lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ) -> Dict:
"""simple docstring"""
__lowercase = []
for start_index, start_score in enumerate(lowerCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase = sorted(lowerCAmelCase_ , key=lambda _lowerCAmelCase : x[1] , reverse=lowerCAmelCase_ )
__lowercase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
__lowercase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCAmelCase )
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase ):
__snake_case :Tuple = VOCAB_FILES_NAMES
__snake_case :Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
__snake_case :Dict = ['input_ids', 'attention_mask']
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
from math import ceil
def snake_case ( lowerCamelCase = 1_001 ):
'''simple docstring'''
__lowercase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowercase = 2 * i + 1
__lowercase = 2 * i
__lowercase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__UpperCamelCase : int = re.compile(r"""\s+""")
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCAmelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [len(UpperCAmelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(UpperCAmelCase__ ), "line_max": max(UpperCAmelCase__ )}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def snake_case ( lowerCamelCase , lowerCamelCase=5 ):
'''simple docstring'''
__lowercase = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__lowercase = example["""content"""].splitlines()
for _, line in zip(range(UpperCAmelCase__ ) , UpperCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case ( lowerCamelCase , lowerCamelCase=5 , lowerCamelCase=0.05 ):
'''simple docstring'''
__lowercase = ["""unit tests""", """test file""", """configuration file"""]
__lowercase = example["""content"""].splitlines()
__lowercase = 0
__lowercase = 0
# first test
for _, line in zip(range(UpperCAmelCase__ ) , UpperCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__lowercase = example["""content"""].count("""\n""" )
__lowercase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = ["""def """, """class """, """for """, """while """]
__lowercase = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case ( lowerCamelCase , lowerCamelCase=4 ):
'''simple docstring'''
__lowercase = example["""content"""].splitlines()
__lowercase = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = tokenizer(example["""content"""] , truncation=UpperCAmelCase__ )["""input_ids"""]
__lowercase = len(example["""content"""] ) / len(UpperCAmelCase__ )
return {"ratio": ratio}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
results.update(get_hash(UpperCAmelCase__ ) )
results.update(line_stats(UpperCAmelCase__ ) )
results.update(alpha_stats(UpperCAmelCase__ ) )
results.update(char_token_ratio(UpperCAmelCase__ ) )
results.update(is_autogenerated(UpperCAmelCase__ ) )
results.update(is_config_or_test(UpperCAmelCase__ ) )
results.update(has_no_keywords(UpperCAmelCase__ ) )
results.update(has_few_assignments(UpperCAmelCase__ ) )
return results
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not check_uniques(UpperCAmelCase__ , UpperCAmelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case ( lowerCamelCase ):
'''simple docstring'''
with open(UpperCAmelCase__ , """rb""" ) as f_in:
with gzip.open(str(UpperCAmelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCAmelCase__ , UpperCAmelCase__ )
os.unlink(UpperCAmelCase__ )
# Settings
__UpperCamelCase : Optional[Any] = HfArgumentParser(PreprocessingArguments)
__UpperCamelCase : Tuple = parser.parse_args()
if args.num_workers is None:
__UpperCamelCase : Union[str, Any] = multiprocessing.cpu_count()
__UpperCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__UpperCamelCase : List[str] = time.time()
__UpperCamelCase : Optional[Any] = load_dataset(args.dataset_name, split="""train""")
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__UpperCamelCase : List[str] = time.time()
__UpperCamelCase : Optional[Any] = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__UpperCamelCase : Optional[Any] = set(ds.unique("""hash"""))
__UpperCamelCase : Optional[int] = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__UpperCamelCase : List[str] = time.time()
__UpperCamelCase : Tuple = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__UpperCamelCase : Optional[int] = time.time()
__UpperCamelCase , __UpperCamelCase : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__UpperCamelCase : Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__UpperCamelCase : List[Any] = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__UpperCamelCase : List[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__UpperCamelCase : List[Any] = str(data_dir / F'''file-{file_number+1:012}.json''')
__UpperCamelCase : str = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case ( lowerCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("""test""" )
else:
__lowercase = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__lowercase = script_name
else:
__lowercase = F'--config_file={args.config_file} {script_name}'
__lowercase = ["""accelerate-launch"""] + test_args.split()
__lowercase = execute_subprocess_async(UpperCAmelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def snake_case ( ):
'''simple docstring'''
__lowercase = test_command_parser()
__lowercase = parser.parse_args()
test_command(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
def snake_case ( lowerCamelCase = 1_000_000 ):
'''simple docstring'''
__lowercase = set(range(3 , _UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCamelCase , _UpperCamelCase ) ) )
__lowercase = [float(_UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCamelCase , limit + 1 , _UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[Any]=56 , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Tuple="gelu_new" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[str]=512 , _lowerCAmelCase : int=16 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[str]="block_sparse" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[int]=3 , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
__lowercase = rescale_embeddings
__lowercase = attention_type
__lowercase = use_bias
__lowercase = block_size
__lowercase = num_random_blocks
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
__snake_case :int = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__snake_case :str = False
__snake_case :Union[str, Any] = False
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _a ( self : int ) -> Any:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _a ( self : str ) -> int:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model_class(_lowerCAmelCase )
@jax.jit
def model_jitted(_lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : List[Any] ):
return model(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , **_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowercase = model_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = model_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=1e-5 , _lowerCAmelCase : Tuple="outputs" , _lowerCAmelCase : Any=None ) -> int:
"""simple docstring"""
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : int = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : List[Any] = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : int = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__UpperCamelCase : Dict = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__UpperCamelCase : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__UpperCamelCase : int = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCamelCase : int = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__UpperCamelCase : Union[str, Any] = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __UpperCamelCase ( __lowerCAmelCase ):
__snake_case :Optional[int] = VOCAB_FILES_NAMES
__snake_case :Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case :Tuple = DPRContextEncoderTokenizer
class __UpperCamelCase ( __lowerCAmelCase ):
__snake_case :List[str] = VOCAB_FILES_NAMES
__snake_case :Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case :Tuple = DPRQuestionEncoderTokenizer
__UpperCamelCase : Any = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__UpperCamelCase : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__UpperCamelCase : Union[str, Any] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__lowerCAmelCase )
class __UpperCamelCase :
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Union[bool, str] = False , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[bool] = None , **_lowerCAmelCase : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
elif titles is None or texts is None:
__lowercase = titles if texts is None else texts
return super().__call__(
lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = titles if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [titles]
__lowercase = texts if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [texts]
__lowercase = len(lowerCamelCase__ )
__lowercase = questions if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) else [questions] * n_passages
assert len(lowerCamelCase__ ) == len(
lowerCamelCase__ ), F'There should be as many titles than texts but got {len(lowerCamelCase__ )} titles and {len(lowerCamelCase__ )} texts.'
__lowercase = super().__call__(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
__lowercase = super().__call__(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )['''input_ids''']
__lowercase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase__ , lowerCamelCase__ )
]
}
if return_attention_mask is not False:
__lowercase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase = attention_mask
return self.pad(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
def _a ( self : List[str] , _lowerCAmelCase : BatchEncoding , _lowerCAmelCase : DPRReaderOutput , _lowerCAmelCase : int = 16 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__lowercase = reader_input['''input_ids''']
__lowercase = reader_output[:3]
__lowercase = len(lowerCamelCase__ )
__lowercase = sorted(range(lowerCamelCase__ ) , reverse=lowerCamelCase__ , key=relevance_logits.__getitem__ )
__lowercase = []
for doc_id in sorted_docs:
__lowercase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase = sequence_ids.index(self.pad_token_id )
else:
__lowercase = len(lowerCamelCase__ )
__lowercase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCamelCase__ , top_spans=lowerCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCamelCase__ , start_index=lowerCamelCase__ , end_index=lowerCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self : List[Any] , _lowerCAmelCase : List[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__lowercase = []
for start_index, start_score in enumerate(lowerCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase = sorted(lowerCamelCase__ , key=lambda _lowerCAmelCase : x[1] , reverse=lowerCamelCase__ )
__lowercase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__lowercase = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowerCAmelCase )
class __UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
__snake_case :Tuple = VOCAB_FILES_NAMES
__snake_case :Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__snake_case :List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case :Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
__snake_case :List[str] = ["input_ids", "attention_mask"]
__snake_case :Optional[Any] = DPRReaderTokenizer
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ['PerceiverFeatureExtractor']
__UpperCamelCase : Dict = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
from __future__ import annotations
import math
class __UpperCamelCase :
def __init__( self : Tuple , _lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = size
# approximate the overall size of segment tree with given value
__lowercase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__lowercase = [0 for i in range(0 , 4 * size )]
__lowercase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _a ( self : Tuple , _lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
return idx * 2
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
return idx * 2 + 1
def _a ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
if left_element == right_element:
__lowercase = a[left_element - 1]
else:
__lowercase = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def _a ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if self.flag[idx] is True:
__lowercase = self.lazy[idx]
__lowercase = False
if left_element != right_element:
__lowercase = self.lazy[idx]
__lowercase = self.lazy[idx]
__lowercase = True
__lowercase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__lowercase = val
if left_element != right_element:
__lowercase = val
__lowercase = val
__lowercase = True
__lowercase = True
return True
__lowercase = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def _a ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
if self.flag[idx] is True:
__lowercase = self.lazy[idx]
__lowercase = False
if left_element != right_element:
__lowercase = self.lazy[idx]
__lowercase = self.lazy[idx]
__lowercase = True
__lowercase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__lowercase = (left_element + right_element) // 2
__lowercase = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self : Any ) -> List[str]:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__UpperCamelCase : Any = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__UpperCamelCase : int = 15
__UpperCamelCase : Any = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :int = ["""image_processor""", """tokenizer"""]
__snake_case :str = """ViltImageProcessor"""
__snake_case :List[str] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : str=None , **_lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCAmelCase , )
__lowercase = kwargs.pop("""feature_extractor""" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self.image_processor
def __call__( self : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : int , ) -> BatchEncoding:
"""simple docstring"""
__lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
# add pixel_values + pixel_mask
__lowercase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
encoding.update(_lowerCAmelCase )
return encoding
def _a ( self : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Union[str, Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.tokenizer.model_input_names
__lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCAmelCase , )
return self.image_processor_class
@property
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCAmelCase , )
return self.image_processor
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case ( ):
'''simple docstring'''
__lowercase , __lowercase = 9, 14 # noqa: F841
__lowercase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowercase = defaultdict(lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowercase = mst(lowerCamelCase )
__lowercase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowercase = tuple(answer[:2] )
__lowercase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = min(lowerCamelCase ) # min() finds the minimum value
__lowercase = max(lowerCamelCase ) # max() finds the maximum value
__lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCamelCase , lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase = 0
for count in range(lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
__lowercase = count + min_val
i += 1
def snake_case ( ):
'''simple docstring'''
__lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCamelCase )
print("""Sorted order is:""" , """ """.join(lowerCamelCase ) )
if __name__ == "__main__":
main()
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'status' must been from type bool"
return status
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase ) ):
for j in range(i + 1 , len(lowerCamelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase ):
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(lowerCamelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase ):
while quotient != 1:
if is_prime(lowerCamelCase ) and (quotient % factor == 0):
ans.append(lowerCamelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type list"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase )
__lowercase = max(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(lowerCamelCase )
__lowercase = min(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ), "'ans' must been from type int"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 == 0
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase ), "compare bust been from type bool"
return number % 2 != 0
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase ) and (number > 2) and is_even(lowerCamelCase )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(lowerCamelCase )
__lowercase = len(lowerCamelCase )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (len(lowerCamelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(lowerCamelCase )
__lowercase = prime_factorization(lowerCamelCase )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(lowerCamelCase , lowerCamelCase )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(lowerCamelCase )
__lowercase = prime_fac_a.count(lowerCamelCase )
for _ in range(max(lowerCamelCase , lowerCamelCase ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(lowerCamelCase )
for _ in range(lowerCamelCase ):
ans *= n
done.append(lowerCamelCase )
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase ):
ans += 1
# precondition
assert isinstance(lowerCamelCase , lowerCamelCase ) and is_prime(
lowerCamelCase ), "'ans' must been a prime number and from type int"
return ans
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase ) and is_prime(lowerCamelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and ans[0] != p_number_a
and ans[len(lowerCamelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(lowerCamelCase )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and isinstance(lowerCamelCase , lowerCamelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(lowerCamelCase ) , abs(lowerCamelCase ) )
# precondition
assert (
isinstance(lowerCamelCase , lowerCamelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def snake_case ( lowerCamelCase ):
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@require_torch
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
__lowercase = load_dataset("""ashraq/esc50""" )
__lowercase = dataset["""train"""]["""audio"""][-1]["""array"""]
__lowercase = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def _a ( self : Any ) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
__lowercase = load_dataset("""ashraq/esc50""" )
__lowercase = dataset["""train"""]["""audio"""][-1]["""array"""]
__lowercase = audio_classifier(_lowerCAmelCase , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
__lowercase = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
__lowercase = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=3 , _lowerCAmelCase : int=32 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Any=10 , _lowerCAmelCase : Union[str, Any]=[10, 20, 30, 40] , _lowerCAmelCase : List[str]=[1, 1, 2, 1] , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]="relu" , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Tuple=None , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(_lowerCAmelCase )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = self.get_config()
return config, pixel_values
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = FlaxRegNetModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = FlaxRegNetForImageClassification(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case :Tuple = False
__snake_case :List[Any] = False
__snake_case :Dict = False
def _a ( self : Any ) -> None:
"""simple docstring"""
__lowercase = FlaxRegNetModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
return
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _a ( self : List[str] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model_class(_lowerCAmelCase )
@jax.jit
def model_jitted(_lowerCAmelCase : Any , **_lowerCAmelCase : Optional[int] ):
return model(pixel_values=_lowerCAmelCase , **_lowerCAmelCase )
with self.subTest("""JIT Enabled""" ):
__lowercase = model_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowercase = model_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = (1, 1000)
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = ["""MaskFormerFeatureExtractor"""]
__UpperCamelCase : Union[str, Any] = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
__UpperCamelCase : Any = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : Optional[Any] = 16
__UpperCamelCase : Tuple = 32
def snake_case ( lowerCamelCase , lowerCamelCase = 16 ):
'''simple docstring'''
__lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase = 16
elif accelerator.mixed_precision != "no":
__lowercase = 8
else:
__lowercase = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
__lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : Optional[int] = mocked_dataloaders # noqa: F811
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase ) == "1":
__lowercase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase = config["""lr"""]
__lowercase = int(config["""num_epochs"""] )
__lowercase = int(config["""seed"""] )
__lowercase = int(config["""batch_size"""] )
set_seed(lowerCamelCase )
__lowercase , __lowercase = get_dataloaders(lowerCamelCase , lowerCamelCase )
__lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowercase = batch_size // MAX_GPU_BATCH_SIZE
__lowercase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase = model.to(accelerator.device )
# Instantiate optimizer
__lowercase = AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
__lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__lowercase = os.path.split(lowerCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__lowercase = 0
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__lowercase = model(**lowerCamelCase )
__lowercase = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
__lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(lowerCamelCase ),
"""epoch""": epoch,
} , step=lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def snake_case ( ):
'''simple docstring'''
__lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowerCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__lowercase = parser.parse_args()
__lowercase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = 'timm_backbone'
def __init__( self : str , _lowerCAmelCase : int=None , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[str] , ) -> int:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = backbone
__lowercase = num_channels
__lowercase = features_only
__lowercase = use_pretrained_backbone
__lowercase = True
__lowercase = out_indices if out_indices is not None else (-1,)
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__UpperCamelCase : Tuple = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__UpperCamelCase : Dict = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__UpperCamelCase : Optional[int] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : int=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[int]="auto" , _lowerCAmelCase : Tuple=-1 , _lowerCAmelCase : Optional[int]=0.9 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=500 , _lowerCAmelCase : List[Any]="gpt2-large" , _lowerCAmelCase : List[str]=-1 , _lowerCAmelCase : List[Any]=1024 , _lowerCAmelCase : Any=25 , _lowerCAmelCase : List[Any]=5 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Optional[int]=25 , ) -> Optional[int]:
"""simple docstring"""
__lowercase = compute_mauve(
p_text=_lowerCAmelCase , q_text=_lowerCAmelCase , p_features=_lowerCAmelCase , q_features=_lowerCAmelCase , p_tokens=_lowerCAmelCase , q_tokens=_lowerCAmelCase , num_buckets=_lowerCAmelCase , pca_max_data=_lowerCAmelCase , kmeans_explained_var=_lowerCAmelCase , kmeans_num_redo=_lowerCAmelCase , kmeans_max_iter=_lowerCAmelCase , featurize_model_name=_lowerCAmelCase , device_id=_lowerCAmelCase , max_text_length=_lowerCAmelCase , divergence_curve_discretization_size=_lowerCAmelCase , mauve_scaling_factor=_lowerCAmelCase , verbose=_lowerCAmelCase , seed=_lowerCAmelCase , )
return out
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
def snake_case ( lowerCamelCase = 1_000_000 ):
'''simple docstring'''
__lowercase = limit + 1
__lowercase = [0] * limit
for first_term in range(1 , lowerCamelCase ):
for n in range(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__lowercase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowercase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
from functools import lru_cache
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 2
__lowercase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCamelCase )
if n > 1:
factors.add(lowerCamelCase )
return factors
@lru_cache
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return len(unique_prime_factors(lowerCamelCase ) )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return len(set(lowerCamelCase ) ) in (0, 1)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 2
while True:
# Increment each value of a generated range
__lowercase = [base + i for i in range(lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowercase = [upf_len(lowerCamelCase ) for x in group]
checker.append(lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case ( lowerCamelCase = 4 ):
'''simple docstring'''
__lowercase = run(lowerCamelCase )
return results[0] if len(lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
# Function to print upper half of diamond (pyramid)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for i in range(0 , lowerCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
for i in range(lowerCamelCase , 0 , -1 ):
for _ in range(lowerCamelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(lowerCamelCase ) # upper half
reverse_floyd(lowerCamelCase ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
__UpperCamelCase : str = 1
while K:
__UpperCamelCase : Optional[int] = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
__UpperCamelCase : str = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
import os
import time
import numpy as np
import onnxruntime as ort
__UpperCamelCase : List[str] = """1"""
__UpperCamelCase : Any = """0"""
__UpperCamelCase : Dict = """1"""
__UpperCamelCase : int = ort.SessionOptions()
__UpperCamelCase : Optional[int] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
__UpperCamelCase : Dict = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
__UpperCamelCase : str = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
__UpperCamelCase : str = ort.RunOptions()
__UpperCamelCase : List[str] = 128
__UpperCamelCase : Any = 1
__UpperCamelCase : int = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : Any = np.ones((batch, sequence), dtype=np.intaa)
__UpperCamelCase : Tuple = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
__UpperCamelCase : Dict = time.time()
__UpperCamelCase : Optional[Any] = 2000
__UpperCamelCase : List[Any] = {}
for iter in range(max_iters):
__UpperCamelCase : Any = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = checkpoint
__lowercase = {}
__lowercase = vae_state_dict["""encoder.conv_in.weight"""]
__lowercase = vae_state_dict["""encoder.conv_in.bias"""]
__lowercase = vae_state_dict["""encoder.conv_out.weight"""]
__lowercase = vae_state_dict["""encoder.conv_out.bias"""]
__lowercase = vae_state_dict["""encoder.norm_out.weight"""]
__lowercase = vae_state_dict["""encoder.norm_out.bias"""]
__lowercase = vae_state_dict["""decoder.conv_in.weight"""]
__lowercase = vae_state_dict["""decoder.conv_in.bias"""]
__lowercase = vae_state_dict["""decoder.conv_out.weight"""]
__lowercase = vae_state_dict["""decoder.conv_out.bias"""]
__lowercase = vae_state_dict["""decoder.norm_out.weight"""]
__lowercase = vae_state_dict["""decoder.norm_out.bias"""]
__lowercase = vae_state_dict["""quant_conv.weight"""]
__lowercase = vae_state_dict["""quant_conv.bias"""]
__lowercase = vae_state_dict["""post_quant_conv.weight"""]
__lowercase = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
__lowercase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
__lowercase = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowerCamelCase )
}
# Retrieves the keys for the decoder up blocks only
__lowercase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
__lowercase = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowerCamelCase )
}
for i in range(lowerCamelCase ):
__lowercase = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__lowercase = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
__lowercase = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'down.{i}.block', """new""": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """encoder.mid.block""" in key]
__lowercase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowercase = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'mid.block_{i}', """new""": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
__lowercase = renew_vae_attention_paths(lowerCamelCase )
__lowercase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
conv_attn_to_linear(lowerCamelCase )
for i in range(lowerCamelCase ):
__lowercase = num_up_blocks - 1 - i
__lowercase = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__lowercase = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
__lowercase = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'up.{block_id}.block', """new""": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """decoder.mid.block""" in key]
__lowercase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__lowercase = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
__lowercase = renew_vae_resnet_paths(lowerCamelCase )
__lowercase = {"""old""": F'mid.block_{i}', """new""": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
__lowercase = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
__lowercase = renew_vae_attention_paths(lowerCamelCase )
__lowercase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase , additional_replacements=[meta_path] , config=lowerCamelCase )
conv_attn_to_linear(lowerCamelCase )
return new_checkpoint
def snake_case ( lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__lowercase = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
__lowercase = io.BytesIO(r.content )
__lowercase = OmegaConf.load(lowerCamelCase )
__lowercase = 512
__lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
__lowercase = {}
with safe_open(lowerCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
__lowercase = f.get_tensor(lowerCamelCase )
else:
__lowercase = torch.load(lowerCamelCase , map_location=lowerCamelCase )["""state_dict"""]
# Convert the VAE model.
__lowercase = create_vae_diffusers_config(lowerCamelCase , image_size=lowerCamelCase )
__lowercase = custom_convert_ldm_vae_checkpoint(lowerCamelCase , lowerCamelCase )
__lowercase = AutoencoderKL(**lowerCamelCase )
vae.load_state_dict(lowerCamelCase )
vae.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
__UpperCamelCase : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCamelCase :
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=13 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Any=16 , _lowerCAmelCase : Dict=[1, 2, 1] , _lowerCAmelCase : Optional[int]=[2, 2, 4] , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Any=2.0 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[str]=1e-5 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=10 , _lowerCAmelCase : str=8 , _lowerCAmelCase : str=["stage1", "stage2", "stage3"] , _lowerCAmelCase : List[str]=[1, 2, 3] , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowerCAmelCase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__snake_case :Union[str, Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__snake_case :str = False
__snake_case :int = False
__snake_case :Tuple = False
__snake_case :Tuple = False
__snake_case :Optional[Any] = False
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def _a ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _a ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def _a ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _a ( self : int ) -> Tuple:
"""simple docstring"""
pass
def _a ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCAmelCase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int={} ):
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase )
__lowercase = model(**_lowerCAmelCase , return_dict=_lowerCAmelCase , **_lowerCAmelCase ).to_tuple()
def recursive_check(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ):
if isinstance(_lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCAmelCase , _lowerCAmelCase ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowerCAmelCase ) , set_nan_tensor_to_zero(_lowerCAmelCase ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
F' {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}. Dict has'
F' `nan`: {torch.isnan(_lowerCAmelCase ).any()} and `inf`: {torch.isinf(_lowerCAmelCase )}.'
) , )
recursive_check(_lowerCAmelCase , _lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
check_equivalence(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {"""output_hidden_states""": True} )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__snake_case :Any = MaskFormerSwinConfig
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(_lowerCAmelCase )
backbone.to(_lowerCAmelCase )
backbone.eval()
__lowercase = backbone(**_lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
__snake_case :List[str]
__snake_case :Optional[str] = None
# Automatically constructed
__snake_case :ClassVar[str] = "dict"
__snake_case :ClassVar[Any] = None
__snake_case :str = field(default='Translation' , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def __call__( self : Any ) -> List[str]:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _a ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
__snake_case :Optional[List] = None
__snake_case :Optional[int] = None
__snake_case :Optional[str] = None
# Automatically constructed
__snake_case :ClassVar[str] = "dict"
__snake_case :ClassVar[Any] = None
__snake_case :str = field(default='TranslationVariableLanguages' , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def _a ( self : Any ) -> str:
"""simple docstring"""
__lowercase = sorted(set(self.languages ) ) if self.languages else None
__lowercase = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> str:
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def _a ( self : Tuple , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = set(self.languages )
if self.languages and set(_lowerCAmelCase ) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(_lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_lowerCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__lowercase = []
for lang, text in translation_dict.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__lowercase , __lowercase = zip(*sorted(_lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def _a ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__UpperCamelCase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(
default=_lowerCAmelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(_lowerCAmelCase )} )
__snake_case :str = field(
default=_lowerCAmelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__snake_case :int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__snake_case :int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__snake_case :float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__snake_case :int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__snake_case :int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__snake_case :int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'train'
__snake_case :Union[str, Any] = 'dev'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :SquadDataTrainingArguments
__snake_case :List[SquadFeatures]
__snake_case :Split
__snake_case :bool
def __init__( self : Union[str, Any] , _lowerCAmelCase : SquadDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[bool] = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = "pt" , ) -> int:
"""simple docstring"""
__lowercase = args
__lowercase = is_language_sensitive
__lowercase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowercase = mode
# Load data features from cache or dataset file
__lowercase = """v2""" if args.version_2_with_negative else """v1"""
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowercase = self.old_features["""features"""]
__lowercase = self.old_features.get("""dataset""" , _lowerCAmelCase )
__lowercase = self.old_features.get("""examples""" , _lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
__lowercase , __lowercase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowerCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCAmelCase , )
__lowercase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowerCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : str ) -> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Any , _lowerCAmelCase : Union[str, Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__lowercase = self.features[i]
__lowercase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowercase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowercase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowercase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowercase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowercase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowercase = torch.tensor(feature.start_position , dtype=torch.long )
__lowercase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
__lowercase , __lowercase = get_aligned_output_features_output_indices(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , ["""c"""] )
self.assertEqual(_lowerCAmelCase , [2] )
# Out indices set to match out features
__lowercase , __lowercase = get_aligned_output_features_output_indices(["""a""", """c"""] , _lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(_lowerCAmelCase , [0, 2] )
# Out features set to match out indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(_lowerCAmelCase , [0, 2] , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(_lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(_lowerCAmelCase , [-3, -1] , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(_lowerCAmelCase , [-3, -1] )
def _a ( self : str ) -> int:
"""simple docstring"""
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _lowerCAmelCase )
# Out features must be a list
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(_lowerCAmelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(_lowerCAmelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(_lowerCAmelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def _a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = BackboneMixin()
__lowercase = ["""a""", """b""", """c"""]
__lowercase = ["""a""", """c"""]
__lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__lowercase = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
__lowercase = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
__UpperCamelCase : Union[str, Any] = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
__UpperCamelCase : List[str] = {value: key for key, value in encode_dict.items()}
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if set(lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
__lowercase = """"""
for word in coded.split():
while len(lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
__lowercase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
import os
def snake_case ( ):
'''simple docstring'''
__lowercase = os.path.dirname(os.path.realpath(lowerCamelCase ) )
__lowercase = os.path.join(lowerCamelCase , """triangle.txt""" )
with open(lowerCamelCase ) as f:
__lowercase = f.readlines()
__lowercase = []
for line in triangle:
__lowercase = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(lowerCamelCase ) )
a.append(lowerCamelCase )
for i in range(1 , len(lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__lowercase = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowercase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(lowerCamelCase , lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = True ):
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__lowercase = timm.create_model("""levit_128s""" , pretrained=lowerCamelCase )
else:
__lowercase = timm.create_model("""levit_128""" , pretrained=lowerCamelCase )
if hidden_sizes == 192:
__lowercase = timm.create_model("""levit_192""" , pretrained=lowerCamelCase )
if hidden_sizes == 256:
__lowercase = timm.create_model("""levit_256""" , pretrained=lowerCamelCase )
if hidden_sizes == 384:
__lowercase = timm.create_model("""levit_384""" , pretrained=lowerCamelCase )
from_model.eval()
__lowercase = LevitForImageClassificationWithTeacher(lowerCamelCase ).eval()
__lowercase = OrderedDict()
__lowercase = from_model.state_dict()
__lowercase = list(from_model.state_dict().keys() )
__lowercase = list(our_model.state_dict().keys() )
print(len(lowerCamelCase ) , len(lowerCamelCase ) )
for i in range(len(lowerCamelCase ) ):
__lowercase = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase )
__lowercase = torch.randn((2, 3, 224, 224) )
__lowercase = from_model(lowerCamelCase )
__lowercase = our_model(lowerCamelCase ).logits
assert torch.allclose(lowerCamelCase , lowerCamelCase ), "The model logits don't match the original one."
__lowercase = name
print(lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__lowercase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = True ):
'''simple docstring'''
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = 1_000
__lowercase = (1, num_labels)
__lowercase = """huggingface/label-files"""
__lowercase = num_labels
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
__lowercase = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
__lowercase = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def snake_case ( lowerCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("""env""" )
else:
__lowercase = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = is_xpu_available()
__lowercase = is_npu_available()
__lowercase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
__lowercase = load_config_from_file(args.config_file ).to_dict()
__lowercase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase ),
"""PyTorch NPU available""": str(lowerCamelCase ),
"""System RAM""": F'{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB',
}
if pt_cuda_available:
__lowercase = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__lowercase = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase , lowerCamelCase )
else F'\t{accelerate_config}'
)
print(lowerCamelCase )
__lowercase = accelerate_config
return info
def snake_case ( ):
'''simple docstring'''
__lowercase = env_command_parser()
__lowercase = parser.parse_args()
env_command(lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """huggingface/label-files"""
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__lowercase = BitConfig(
conv_layer=lowerCamelCase , num_labels=1_000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if "stem.conv" in name:
__lowercase = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__lowercase = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__lowercase = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__lowercase = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__lowercase = """bit.encoder.""" + name
return name
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__lowercase = get_config(lowerCamelCase )
# load original model from timm
__lowercase = create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
# load state_dict of original model
__lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(lowerCamelCase )
__lowercase = val.squeeze() if """head""" in key else val
# load HuggingFace model
__lowercase = BitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# create image processor
__lowercase = create_transform(**resolve_data_config({} , model=lowerCamelCase ) )
__lowercase = transform.transforms
__lowercase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__lowercase = BitImageProcessor(
do_resize=lowerCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__lowercase = prepare_img()
__lowercase = transform(lowerCamelCase ).unsqueeze(0 )
__lowercase = processor(lowerCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
# verify logits
with torch.no_grad():
__lowercase = model(lowerCamelCase )
__lowercase = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__lowercase = timm_model(lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
__UpperCamelCase : str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Union[str, Any]=0.9 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , _lowerCAmelCase : str=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
__lowercase = size if size is not None else {"""shortest_edge""": 30}
__lowercase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize_and_center_crop
__lowercase = size
__lowercase = crop_pct
__lowercase = crop_size
__lowercase = do_normalize
__lowercase = image_mean
__lowercase = image_std
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = PoolFormerImageProcessor if is_vision_available() else None
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = PoolFormerImageProcessingTester(self )
@property
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """crop_pct""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
__lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
__lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'xlm'
__snake_case :Any = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : str , _lowerCAmelCase : Tuple=3_0145 , _lowerCAmelCase : Optional[Any]=2048 , _lowerCAmelCase : Any=12 , _lowerCAmelCase : List[Any]=16 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Any=False , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : Tuple=2048**-0.5 , _lowerCAmelCase : int=1e-12 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : int=0 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]="first" , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Dict=True , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[Any]=0 , **_lowerCAmelCase : str , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = emb_dim
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = gelu_activation
__lowercase = sinusoidal_embeddings
__lowercase = causal
__lowercase = asm
__lowercase = n_langs
__lowercase = use_lang_emb
__lowercase = layer_norm_eps
__lowercase = bos_index
__lowercase = eos_index
__lowercase = pad_index
__lowercase = unk_index
__lowercase = mask_index
__lowercase = is_encoder
__lowercase = max_position_embeddings
__lowercase = embed_init_std
__lowercase = init_std
__lowercase = summary_type
__lowercase = summary_use_proj
__lowercase = summary_activation
__lowercase = summary_proj_to_labels
__lowercase = summary_first_dropout
__lowercase = start_n_top
__lowercase = end_n_top
__lowercase = mask_token_id
__lowercase = lang_id
if "n_words" in kwargs:
__lowercase = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__UpperCamelCase : Tuple = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : str=False , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : List[Any]=6.0 , _lowerCAmelCase : int=None , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple="fp4" , _lowerCAmelCase : List[str]=False , **_lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
__lowercase = load_in_abit
__lowercase = load_in_abit
__lowercase = llm_inta_threshold
__lowercase = llm_inta_skip_modules
__lowercase = llm_inta_enable_fpaa_cpu_offload
__lowercase = llm_inta_has_fpaa_weight
__lowercase = bnb_abit_quant_type
__lowercase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowercase = torch.floataa
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase )
elif isinstance(_lowerCAmelCase , torch.dtype ):
__lowercase = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _lowerCAmelCase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _lowerCAmelCase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _lowerCAmelCase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , _lowerCAmelCase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , _lowerCAmelCase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , _lowerCAmelCase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , **_lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = cls(**_lowerCAmelCase )
__lowercase = []
for key, value in kwargs.items():
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
to_remove.append(_lowerCAmelCase )
for key in to_remove:
kwargs.pop(_lowerCAmelCase , _lowerCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def _a ( self : Union[str, Any] , _lowerCAmelCase : Union[str, os.PathLike] ) -> Tuple:
"""simple docstring"""
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
__lowercase = self.to_dict()
__lowercase = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + """\n"""
writer.write(_lowerCAmelCase )
def _a ( self : List[str] ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : int ) -> List[str]:
"""simple docstring"""
return F'{self.__class__.__name__} {self.to_json_string()}'
def _a ( self : List[Any] , _lowerCAmelCase : bool = True ) -> str:
"""simple docstring"""
if use_diff is True:
__lowercase = self.to_diff_dict()
else:
__lowercase = self.to_dict()
return json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
def _a ( self : str ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase = self.to_dict()
# get the default config dict
__lowercase = BitsAndBytesConfig().to_dict()
__lowercase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowercase = value
return serializable_config_dict
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=99 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : int=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : int=4 , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :int = True
__snake_case :Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
__lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : str ) -> str:
"""simple docstring"""
__lowercase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
__lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , _lowerCAmelCase )
# compare the actual values for a slice.
__lowercase = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
__lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__lowercase = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
__lowercase = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 707
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Union[str, Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _a ( self : Any , _lowerCAmelCase : str=0 ) -> str:
"""simple docstring"""
__lowercase = np.random.RandomState(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**_lowerCAmelCase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ["""this is a negative prompt"""]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["""prompt"""]]
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop("""prompt""" )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
_lowerCAmelCase , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""np""" , )
__lowercase = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**_lowerCAmelCase )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """open neural network exchange"""
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type="""np""" )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase = 0
def test_callback_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """Andromeda galaxy in a bottle"""
__lowercase = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 53
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = XLNetTokenizer
__snake_case :int = XLNetTokenizerFast
__snake_case :Union[str, Any] = True
__snake_case :str = True
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = XLNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = """<s>"""
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(_lowerCAmelCase ) , 1006 )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = XLNetTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
__lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = XLNetTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = XLNetTokenizer(_lowerCAmelCase , do_lower_case=_lowerCAmelCase )
__lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 708
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowercase = remove_duplicates(key.upper() )
__lowercase = len(lowerCamelCase )
# First fill cipher with key characters
__lowercase = {alphabet[i]: char for i, char in enumerate(lowerCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowerCamelCase ) , 26 ):
__lowercase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowercase = alphabet[i - offset]
__lowercase = char
return cipher_alphabet
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowerCamelCase , lowerCamelCase ) for ch in message.upper() )
def snake_case ( ):
'''simple docstring'''
__lowercase = input("""Enter message to encode or decode: """ ).strip()
__lowercase = input("""Enter keyword: """ ).strip()
__lowercase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__lowercase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__lowercase = create_cipher_map(lowerCamelCase )
print(func(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 53
| 0
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
__UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
__UpperCamelCase : Tuple = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Tuple = 'bertabs'
def __init__( self : Union[str, Any] , _lowerCAmelCase : Any=3_0522 , _lowerCAmelCase : Optional[Any]=512 , _lowerCAmelCase : str=6 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : List[str]=8 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : Any=0.2 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : List[Any]=8 , _lowerCAmelCase : str=2048 , _lowerCAmelCase : int=0.2 , **_lowerCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = vocab_size
__lowercase = max_pos
__lowercase = enc_layers
__lowercase = enc_hidden_size
__lowercase = enc_heads
__lowercase = enc_ff_size
__lowercase = enc_dropout
__lowercase = dec_layers
__lowercase = dec_hidden_size
__lowercase = dec_heads
__lowercase = dec_ff_size
__lowercase = dec_dropout
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = IFInpaintingPipeline
__snake_case :str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__snake_case :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case :str = PipelineTesterMixin.required_optional_params - {'latents'}
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=0 ) -> Any:
"""simple docstring"""
if str(_lowerCAmelCase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(_lowerCAmelCase )
else:
__lowercase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 53
| 0
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = False
__lowercase = nn.Dropout(p=_lowerCAmelCase )
__lowercase = TaConfig(
vocab_size=_lowerCAmelCase , d_model=_lowerCAmelCase , num_heads=_lowerCAmelCase , d_kv=_lowerCAmelCase , d_ff=_lowerCAmelCase , dropout_rate=_lowerCAmelCase , feed_forward_proj=_lowerCAmelCase , is_decoder=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , )
__lowercase = nn.ModuleList()
for lyr_num in range(_lowerCAmelCase ):
__lowercase = TaBlock(_lowerCAmelCase )
self.encoders.append(_lowerCAmelCase )
__lowercase = TaLayerNorm(_lowerCAmelCase )
__lowercase = nn.Dropout(p=_lowerCAmelCase )
def _a ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.token_embedder(_lowerCAmelCase )
__lowercase = encoder_input_tokens.shape[1]
__lowercase = torch.arange(_lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowerCAmelCase )
__lowercase = self.dropout_pre(_lowerCAmelCase )
# inverted the attention mask
__lowercase = encoder_input_tokens.size()
__lowercase = self.get_extended_attention_mask(_lowerCAmelCase , _lowerCAmelCase )
for lyr in self.encoders:
__lowercase = lyr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase = self.layer_norm(_lowerCAmelCase )
return self.dropout_post(_lowerCAmelCase ), encoder_inputs_mask
| 710
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 53
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
__lowercase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowercase = v
else:
__lowercase = v
__lowercase = chkpt["""params"""]
__lowercase = {n: v for n, v in config.items() if not isinstance(lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
__lowercase = chkpt["""dico_word2id"""]
__lowercase = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__lowercase = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 711
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__UpperCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase :
__snake_case :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__snake_case :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__snake_case :int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.task_name.lower()
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Optional[int] = 'train'
__snake_case :int = 'dev'
__snake_case :Any = 'test'
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :GlueDataTrainingArguments
__snake_case :str
__snake_case :List[InputFeatures]
def __init__( self : Dict , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , _lowerCAmelCase , )
__lowercase = args
__lowercase = glue_processors[args.task_name]()
__lowercase = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__lowercase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__lowercase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , )
__lowercase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowercase , __lowercase = label_list[2], label_list[1]
__lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowercase = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__lowercase = time.time()
__lowercase = torch.load(_lowerCAmelCase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
else:
logger.info(F'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
__lowercase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__lowercase = self.processor.get_test_examples(args.data_dir )
else:
__lowercase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__lowercase = examples[:limit_length]
__lowercase = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
__lowercase = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowerCAmelCase : Optional[int] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _a ( self : str ) -> int:
"""simple docstring"""
return self.label_list
| 53
| 0
|
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0 for i in range(n + 1 )]
__lowercase = 1
__lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCamelCase ):
__lowercase = 1
__lowercase = 0
for i in range(lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__UpperCamelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCAmelCase )} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'The input training data file (a text file).'} )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__snake_case :Optional[str] = field(
default=_lowerCAmelCase , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__snake_case :bool = field(default=_lowerCAmelCase , metadata={'help': 'Whether ot not to use whole word mask.'} )
__snake_case :float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__snake_case :float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__snake_case :int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__snake_case :int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__snake_case :bool = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , ):
'''simple docstring'''
def _dataset(lowerCamelCase , lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , ref_path=lowerCamelCase , )
return LineByLineTextDataset(tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase , file_path=lowerCamelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ):
'''simple docstring'''
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
__lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
__lowercase = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
__lowercase = AutoModelWithLMHead.from_config(lowerCamelCase )
model.resize_token_embeddings(len(lowerCamelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
__lowercase = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__lowercase = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__lowercase = (
get_dataset(lowerCamelCase , tokenizer=lowerCamelCase , evaluate=lowerCamelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__lowercase = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__lowercase = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase , mlm_probability=data_args.mlm_probability )
else:
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowercase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , data_collator=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , prediction_loss_only=lowerCamelCase , )
# Training
if training_args.do_train:
__lowercase = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase = trainer.evaluate()
__lowercase = math.exp(eval_output["""eval_loss"""] )
__lowercase = {"""perplexity""": perplexity}
__lowercase = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase )
return results
def snake_case ( lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 53
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = Mock()
__lowercase = conn, Mock()
__lowercase = iter([1, None] )
__lowercase = lambda lowerCamelCase : next(lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 713
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowercase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import math
class __UpperCamelCase :
def _a ( self : int , _lowerCAmelCase : list[list[float]] , _lowerCAmelCase : list[int] ) -> int:
"""simple docstring"""
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(_lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _a ( self : str , _lowerCAmelCase : list[list[int | float]] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : float ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case ( ):
'''simple docstring'''
__lowercase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__lowercase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__lowercase = SelfOrganizingMap()
__lowercase = 3
__lowercase = 0.5
for _ in range(lowerCamelCase ):
for j in range(len(lowerCamelCase ) ):
# training sample
__lowercase = training_samples[j]
# Compute the winning vector
__lowercase = self_organizing_map.get_winner(lowerCamelCase , lowerCamelCase )
# Update the winning vector
__lowercase = self_organizing_map.update(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# classify test sample
__lowercase = [0, 0, 0, 1]
__lowercase = self_organizing_map.get_winner(lowerCamelCase , lowerCamelCase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 714
|
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
| 0
|
import numpy
# List of input, output pairs
__UpperCamelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCamelCase : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
__UpperCamelCase : List[Any] = [2, 4, 1, 5]
__UpperCamelCase : Union[str, Any] = len(train_data)
__UpperCamelCase : List[Any] = 0.0_0_9
def snake_case ( lowerCamelCase , lowerCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def snake_case ( lowerCamelCase , lowerCamelCase=m ):
'''simple docstring'''
__lowercase = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def snake_case ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase = 0.000002
__lowercase = 0
__lowercase = 0
while True:
j += 1
__lowercase = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
__lowercase = get_cost_derivative(i - 1 )
__lowercase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
__lowercase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def snake_case ( ):
'''simple docstring'''
for i in range(len(lowerCamelCase ) ):
print(("""Actual output value:""", output(lowerCamelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCamelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 715
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
__lowercase = torch.load(hf_hub_download(repo_id=lowerCamelCase , filename="""pytorch_model.bin""" ) )
__lowercase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
__lowercase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
__lowercase = tensor_value
__lowercase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase , config=lowerCamelCase , state_dict=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
# convert tokenizer
__lowercase = AutoTokenizer.from_pretrained(lowerCamelCase )
tokenizer.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 53
| 0
|
import requests
__UpperCamelCase : Optional[Any] = """""" # <-- Put your OpenWeatherMap appid here!
__UpperCamelCase : Optional[int] = """https://api.openweathermap.org/data/2.5/"""
def snake_case ( lowerCamelCase = "Chicago" , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def snake_case ( lowerCamelCase = "Kolkata, India" , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def snake_case ( lowerCamelCase = 55.68 , lowerCamelCase = 12.57 , lowerCamelCase = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__UpperCamelCase : List[Any] = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 716
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
__lowercase = ksize + 1
__lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowerCamelCase ):
for x in range(lowerCamelCase ):
# distance from center
__lowercase = x - ksize // 2
__lowercase = y - ksize // 2
# degree to radiant
__lowercase = theta / 180 * np.pi
__lowercase = np.cos(_theta )
__lowercase = np.sin(_theta )
# get kernel x
__lowercase = cos_theta * px + sin_theta * py
# get kernel y
__lowercase = -sin_theta * px + cos_theta * py
# fill kernel
__lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCamelCase : List[Any] = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__UpperCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCamelCase : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCamelCase : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCamelCase : List[str] = out / out.max() * 255
__UpperCamelCase : List[str] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 53
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
__UpperCamelCase : Union[str, Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
__UpperCamelCase : int = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
__UpperCamelCase : Dict = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCamelCase : int = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 717
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = []
def parse_line(lowerCamelCase ):
for line in fp:
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase ) > 0:
__lowercase = """\n""".join(lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase )
buffer.clear()
continue
else:
__lowercase = line.strip()
buffer.append(lowerCamelCase )
if from_gh:
for filename in os.listdir(lowerCamelCase ):
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
else:
try:
with zipfile.ZipFile(lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase ) as fp:
parse_line(lowerCamelCase )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = set()
__lowercase = [os.path.join(lowerCamelCase , lowerCamelCase ) for p in os.listdir(lowerCamelCase ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase , lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return values.split(""",""" )
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__UpperCamelCase : List[str] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__UpperCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__UpperCamelCase : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
__UpperCamelCase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 53
| 0
|
import re
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(lowerCamelCase , lowerCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("""+918827897895"""))
| 718
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase : Any = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 53
| 0
|
import qiskit
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__lowercase = qiskit.QuantumCircuit(lowerCamelCase , lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowercase = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 719
|
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 53
| 0
|
import math
import qiskit
def snake_case ( lowerCamelCase = 1 , lowerCamelCase = 1 , lowerCamelCase = 1 ):
'''simple docstring'''
if (
isinstance(lowerCamelCase , lowerCamelCase )
or isinstance(lowerCamelCase , lowerCamelCase )
or isinstance(lowerCamelCase , lowerCamelCase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(lowerCamelCase ) != input_a)
or (math.floor(lowerCamelCase ) != input_a)
or (math.floor(lowerCamelCase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
__lowercase = qiskit.QuantumRegister(4 , """qr""" )
__lowercase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
__lowercase = [input_a, input_a, carry_in]
__lowercase = qiskit.QuantumCircuit(lowerCamelCase , lowerCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase ) # measure the last two qbits
__lowercase = qiskit.Aer.get_backend("""aer_simulator""" )
__lowercase = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1_000 )
return job.result().get_counts(lowerCamelCase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase : Tuple = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 53
| 0
|
'''simple docstring'''
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase = str(lowerCamelCase )
__lowercase = """""".join(sorted(lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 721
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = """stabilityai/stable-diffusion-2"""
__lowercase , __lowercase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
__lowercase , __lowercase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowercase = scheduler_params
__lowercase = """A painting of a squirrel eating a burger"""
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = sd_pipe.prepare_inputs(_lowerCAmelCase )
__lowercase = replicate(_lowerCAmelCase )
__lowercase = shard(_lowerCAmelCase )
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.split(_lowerCAmelCase , jax.device_count() )
__lowercase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__lowercase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowercase = images[0, 253:256, 253:256, -1]
__lowercase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 53
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {"""vocab_file""": """vocab.json"""}
__UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
__UpperCamelCase : List[Any] = {"""mgp-str""": 27}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = VOCAB_FILES_NAMES
__snake_case :Dict = PRETRAINED_VOCAB_FILES_MAP
__snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , _lowerCAmelCase : Any , _lowerCAmelCase : int="[GO]" , _lowerCAmelCase : Optional[int]="[GO]" , _lowerCAmelCase : List[str]="[s]" , _lowerCAmelCase : Tuple="[GO]" , **_lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , **_lowerCAmelCase , )
with open(_lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
__lowercase = json.load(_lowerCAmelCase )
__lowercase = {v: k for k, v in self.vocab.items()}
@property
def _a ( self : int ) -> int:
"""simple docstring"""
return len(self.vocab )
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def _a ( self : Any , _lowerCAmelCase : str ) -> Any:
"""simple docstring"""
__lowercase = []
for s in text:
char_tokens.extend(_lowerCAmelCase )
return char_tokens
def _a ( self : Any , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
return self.vocab.get(_lowerCAmelCase , self.vocab.get(self.unk_token ) )
def _a ( self : Any , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.decoder.get(_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_lowerCAmelCase ) )
return
__lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + """\n""" )
return (vocab_file,)
| 700
|
import heapq
import sys
import numpy as np
__UpperCamelCase : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = set()
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _a ( self : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_lowerCAmelCase )
else:
# update
# print("update", item)
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self : List[str] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if item in self.set:
self.set.remove(_lowerCAmelCase )
__lowercase = []
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
((__lowercase) , (__lowercase)) = heapq.heappop(self.elements )
self.set.remove(_lowerCAmelCase )
return (priority, item)
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
__lowercase = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__lowercase = """*"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowercase = """#"""
__lowercase = """-"""
__lowercase = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) = x
# print(x)
__lowercase = """-"""
__lowercase = back_pointer[x]
__lowercase = """-"""
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=""" """ )
__lowercase = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) = s
__lowercase = (x - 1, y)
__lowercase = (x + 1, y)
__lowercase = (x, y + 1)
__lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__lowercase = -1
__lowercase = float("""inf""" )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowercase = g_function[s] + 1
__lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def snake_case ( ):
'''simple docstring'''
__lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCamelCase : Optional[int] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCamelCase : Optional[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCamelCase : Optional[Any] = make_common_ground()
__UpperCamelCase : Dict = blocks_blk
# hyper parameters
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Union[str, Any] = 1
__UpperCamelCase : Optional[int] = 20
__UpperCamelCase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCamelCase : str = (0, 0)
__UpperCamelCase : str = (n - 1, n - 1)
__UpperCamelCase : Optional[Any] = 1
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = {start: 0, goal: float("""inf""" )}
__lowercase = {start: -1, goal: -1}
__lowercase = []
__lowercase = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__lowercase = []
__lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase , __lowercase = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__lowercase = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 53
| 0
|
import torch
def snake_case ( ):
'''simple docstring'''
if torch.cuda.is_available():
__lowercase = torch.cuda.device_count()
else:
__lowercase = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 701
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__lowercase = MaskFormerConfig(backbone_config=lowerCamelCase )
__lowercase = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__lowercase = 847
__lowercase = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__lowercase = 150
__lowercase = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__lowercase = 171
__lowercase = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__lowercase = 133
__lowercase = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__lowercase = 19
__lowercase = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__lowercase = 65
__lowercase = """mapillary-vistas-id2label.json"""
__lowercase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = dct.pop(lowerCamelCase )
__lowercase = val
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowercase = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__lowercase = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[: hidden_size, :]
__lowercase = in_proj_bias[:config.hidden_size]
__lowercase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size :, :]
__lowercase = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
__lowercase = get_maskformer_config(lowerCamelCase )
# load original state_dict
with open(lowerCamelCase , """rb""" ) as f:
__lowercase = pickle.load(lowerCamelCase )
__lowercase = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowercase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
read_in_swin_q_k_v(lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(lowerCamelCase , lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__lowercase = torch.from_numpy(lowerCamelCase )
# load 🤗 model
__lowercase = MaskFormerForInstanceSegmentation(lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(lowerCamelCase , param.shape )
__lowercase , __lowercase = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
__lowercase = prepare_img()
if "vistas" in model_name:
__lowercase = 65
elif "cityscapes" in model_name:
__lowercase = 65_535
else:
__lowercase = 255
__lowercase = True if """ade""" in model_name else False
__lowercase = MaskFormerImageProcessor(ignore_index=lowerCamelCase , reduce_labels=lowerCamelCase )
__lowercase = image_processor(lowerCamelCase , return_tensors="""pt""" )
__lowercase = model(**lowerCamelCase )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowercase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
| 0
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Tuple=8 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=99 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Optional[Any]=36 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Optional[int]=None , ) -> Dict:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Any ) -> int:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = 300
return config
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
__lowercase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
__lowercase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
__snake_case :Dict = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case :List[Any] = False
__snake_case :List[str] = False
__snake_case :Dict = False
__snake_case :Tuple = False
__snake_case :Optional[Any] = ()
def _a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = MraModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : int ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = 5_0265
__lowercase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__lowercase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase )[0]
__lowercase = 5_0265
__lowercase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 702
|
from math import sqrt
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( lowerCamelCase = 10_001 ):
'''simple docstring'''
__lowercase = 0
__lowercase = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 53
| 0
|
import re
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
try:
__lowercase = split_input(lowerCamelCase )
if upper:
__lowercase = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowercase = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case ( lowerCamelCase ):
'''simple docstring'''
return to_simple_case(lowerCamelCase )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
try:
__lowercase = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return to_complex_case(lowerCamelCase , lowerCamelCase , """_""" )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return to_complex_case(lowerCamelCase , lowerCamelCase , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 703
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if isinstance(lowerCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __UpperCamelCase :
def _a ( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( self : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _a ( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=None , **_lowerCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , **_lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model(input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
def _a ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : List[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float ) -> Optional[int]:
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowerCAmelCase , _lowerCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_lowerCAmelCase )
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowerCAmelCase )
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowerCAmelCase )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowerCAmelCase )
@slow
def _a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(_lowerCAmelCase )
__lowercase = model_a(**_lowerCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = TFViTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , **_lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=_lowerCAmelCase , text_model=_lowerCAmelCase )
__lowercase = model(
input_ids=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowerCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a ( self : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> int:
"""simple docstring"""
__lowercase = TFDeiTModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFCLIPVisionModel(_lowerCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(_lowerCAmelCase , name="""text_model""" )
return vision_model, text_model
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=_lowerCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""np""" )
__lowercase = model(**_lowerCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _lowerCAmelCase , atol=1e-3 ) )
| 53
| 0
|
import math
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = len(lowerCamelCase )
__lowercase = int(math.floor(math.sqrt(lowerCamelCase ) ) )
__lowercase = 0
while arr[min(lowerCamelCase , lowerCamelCase ) - 1] < x:
__lowercase = step
step += int(math.floor(math.sqrt(lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowercase = prev + 1
if prev == min(lowerCamelCase , lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCamelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : List[str] = [int(item) for item in user_input.split(""",""")]
__UpperCamelCase : Dict = int(input("""Enter the number to be searched:\n"""))
__UpperCamelCase : List[Any] = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'''Number {x} is at index {res}''')
| 704
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : list[tuple[float, float]] ) -> Any:
"""simple docstring"""
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(_lowerCAmelCase ) - 1
def _a ( self : Tuple , _lowerCAmelCase : float ) -> list[float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _lowerCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_lowerCAmelCase ) , 5 ) == 1
return output_values
def _a ( self : List[str] , _lowerCAmelCase : float ) -> tuple[float, float]:
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(_lowerCAmelCase )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _a ( self : Optional[int] , _lowerCAmelCase : float = 0.01 ) -> Union[str, Any]:
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(_lowerCAmelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
_lowerCAmelCase , _lowerCAmelCase , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(_lowerCAmelCase , _lowerCAmelCase , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 53
| 0
|
from manim import *
class __UpperCamelCase ( _lowerCAmelCase ):
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = Rectangle(height=0.5 , width=0.5 )
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""CPU""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
__lowercase = [mem.copy() for i in range(1 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""GPU""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase , _lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
__lowercase = Text("""Model""" , font_size=24 )
__lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) , Create(_lowerCAmelCase , run_time=1 ) , )
__lowercase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=2.5 ) , Write(_lowerCAmelCase ) , Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(_lowerCAmelCase ):
__lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
__lowercase = 0.46 / 4
__lowercase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCAmelCase , buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase , run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 705
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __UpperCamelCase :
def __init__( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : int = 13 , _lowerCAmelCase : int = 64 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 3 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 128 , _lowerCAmelCase : Optional[int]=[16, 32, 64, 128] , _lowerCAmelCase : int = 7 , _lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 37 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 10 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 128 , _lowerCAmelCase : List[int] = [2, 2, 2, 2] , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 2 , ) -> Tuple:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = encoder_stride
__lowercase = num_attention_outputs
__lowercase = embed_dim
__lowercase = embed_dim + 1
__lowercase = resolution
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = dim
__lowercase = mlp_expansion_ratio
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFEfficientFormerModel(config=_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.type_sequence_label_size
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = TFEfficientFormerForImageClassification(_lowerCAmelCase )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__snake_case :Any = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__snake_case :int = False
__snake_case :Optional[int] = False
__snake_case :int = False
__snake_case :Any = False
__snake_case :Any = False
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerModelTester(self )
__lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def _a ( self : int ) -> str:
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ):
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__lowercase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__lowercase = seq_length * self.model_tester.chunk_length
else:
__lowercase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__lowercase = outputs.decoder_hidden_states
self.asseretIsInstance(_lowerCAmelCase , (list, tuple) )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """decoder_seq_length""" , _lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
__lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def _a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFEfficientFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , """seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """encoder_seq_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
__lowercase = getattr(self.model_tester , """chunk_length""" , _lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__lowercase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_lowerCAmelCase )
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) , training=_lowerCAmelCase )
__lowercase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__lowercase = model_class(_lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__lowercase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__lowercase = model(_lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""tf""" )
# forward pass
__lowercase = model(**_lowerCAmelCase , training=_lowerCAmelCase )
# verify the logits
__lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 53
| 0
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :str = (UnCLIPScheduler,)
def _a ( self : Optional[int] , **_lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self : str ) -> int:
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.0_010_011 < 1e-5
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
__lowercase = scheduler.timesteps
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
__lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
__lowercase = None
else:
__lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
__lowercase = pred_prev_sample
__lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
__lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : int ) -> List[str]:
"""simple docstring"""
pass
| 706
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCamelCase : Tuple = 2
class __UpperCamelCase :
def __init__( self : List[str] , *, # begin keyword-only arguments
_lowerCAmelCase : Optional[int]="<s>" , _lowerCAmelCase : Optional[int]="<pad>" , _lowerCAmelCase : int="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : List[str]=None , ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
__lowercase = self.add_symbol(_lowerCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_lowerCAmelCase )
__lowercase = len(self.symbols )
def __eq__( self : Dict , _lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : Any , _lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> List[str]:
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Union[str, Any] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls : Dict , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = cls()
d.add_from_file(_lowerCAmelCase )
return d
def _a ( self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(_lowerCAmelCase )
self.count.append(_lowerCAmelCase )
return idx
def _a ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 0
def _a ( self : Optional[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
with open(_lowerCAmelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_lowerCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_lowerCAmelCase ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(_lowerCAmelCase )
for line in lines[indices_start_line:]:
try:
__lowercase , __lowercase = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase , __lowercase = line.rsplit(""" """ , 1 )
else:
__lowercase = False
__lowercase = int(_lowerCAmelCase )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_lowerCAmelCase ) )
self.add_symbol(_lowerCAmelCase , n=_lowerCAmelCase , overwrite=_lowerCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowerCamelCase ), v) for k, v in d.items() )
__lowercase = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__lowercase = d[k] # restore
return da
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__lowercase = os.path.join(lowerCamelCase , """checkpoint.pt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
__lowercase = torch.load(lowerCamelCase , map_location="""cpu""" )
__lowercase = chkpt["""cfg"""]["""model"""]
# dicts
__lowercase = os.path.join(lowerCamelCase , """dict.txt""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
__lowercase = Dictionary.load(lowerCamelCase )
__lowercase = rewrite_dict_keys(src_dict.indices )
__lowercase = len(lowerCamelCase )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# merges_file (bpecodes)
__lowercase = os.path.join(lowerCamelCase , """bpecodes""" )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
__lowercase = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(lowerCamelCase , lowerCamelCase )
# model config
__lowercase = os.path.join(lowerCamelCase , """config.json""" )
__lowercase = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# tokenizer config
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
__lowercase = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1_024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase ) )
# model
__lowercase = chkpt["""model"""]
# remove unneeded keys
__lowercase = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase )
__lowercase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
__lowercase = model_state_dict.pop(lowerCamelCase )
else:
__lowercase = model_state_dict.pop(lowerCamelCase )
__lowercase = BioGptConfig.from_pretrained(lowerCamelCase )
__lowercase = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
__lowercase = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase , lowerCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 53
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.