code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def lowercase__ ( lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[Any] = ""
for i in table:
res += inp[i - 1]
return res
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return data[1:] + data[0]
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Dict = ""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase__ ( lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = int("0b" + data[0] + data[-1] ,2 )
_UpperCamelCase : Tuple = int("0b" + data[1:3] ,2 )
return bin(s[row][col] )[2:]
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : Any = message[:4]
_UpperCamelCase : str = message[4:]
_UpperCamelCase : List[Any] = apply_table(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = xor(lowercase_ ,lowercase_ )
_UpperCamelCase : Tuple = apply_sbox(lowercase_ ,temp[:4] ) # noqa: E741
_UpperCamelCase : Tuple = apply_sbox(lowercase_ ,temp[4:] )
_UpperCamelCase : List[Any] = "0" * (2 - len(lowercase_ )) + l # noqa: E741
_UpperCamelCase : Optional[int] = "0" * (2 - len(lowercase_ )) + r
_UpperCamelCase : Any = apply_table(l + r ,lowercase_ )
_UpperCamelCase : str = xor(lowercase_ ,lowercase_ )
return temp + right
if __name__ == "__main__":
lowerCamelCase__ = input("Enter 10 bit key: ")
lowerCamelCase__ = input("Enter 8 bit message: ")
lowerCamelCase__ = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCamelCase__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCamelCase__ = [2, 4, 3, 1]
lowerCamelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCamelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCamelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCamelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCamelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCamelCase__ = apply_table(key, paa_table)
lowerCamelCase__ = temp[:5]
lowerCamelCase__ = temp[5:]
lowerCamelCase__ = left_shift(left)
lowerCamelCase__ = left_shift(right)
lowerCamelCase__ = apply_table(left + right, pa_table)
lowerCamelCase__ = left_shift(left)
lowerCamelCase__ = left_shift(right)
lowerCamelCase__ = left_shift(left)
lowerCamelCase__ = left_shift(right)
lowerCamelCase__ = apply_table(left + right, pa_table)
# encryption
lowerCamelCase__ = apply_table(message, IP)
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = temp[4:] + temp[:4]
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
lowerCamelCase__ = apply_table(CT, IP)
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = temp[4:] + temp[:4]
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 624
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowerCamelCase__ = "src/transformers"
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase__ = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase__ = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowerCamelCase__ = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase__ = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase__ = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase__ = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase__ = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase__ = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowerCamelCase__ = re.compile(R"^\s*try:")
# Catches a line with else:
lowerCamelCase__ = re.compile(R"^\s*else:")
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if _re_test_backend.search(lowercase_ ) is None:
return None
_UpperCamelCase : Optional[Any] = [b[0] for b in _re_backend.findall(lowercase_ )]
backends.sort()
return "_and_".join(lowercase_ )
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
with open(lowercase_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
_UpperCamelCase : Optional[Any] = f.readlines()
_UpperCamelCase : str = 0
while line_index < len(lowercase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase_ ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCamelCase : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCamelCase : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase_ ):
_UpperCamelCase : Tuple = _re_one_line_import_struct.search(lowercase_ ).groups()[0]
_UpperCamelCase : Any = re.findall(r"\[([^\]]+)\]" ,lowercase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCamelCase : str = _re_import_struct_key_value.search(lowercase_ )
if single_line_import_search is not None:
_UpperCamelCase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCamelCase : str = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCamelCase : Dict = lines[line_index]
if _re_import_struct_add_one.search(lowercase_ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase_ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase_ ) is not None:
_UpperCamelCase : int = _re_import_struct_add_many.search(lowercase_ ).groups()[0].split(", " )
_UpperCamelCase : str = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_between_brackets.search(lowercase_ ) is not None:
_UpperCamelCase : Optional[Any] = _re_between_brackets.search(lowercase_ ).groups()[0].split(", " )
_UpperCamelCase : Any = [obj[1:-1] for obj in imports if len(lowercase_ ) > 0]
objects.extend(lowercase_ )
elif _re_quote_object.search(lowercase_ ) is not None:
objects.append(_re_quote_object.search(lowercase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCamelCase : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCamelCase : Optional[Any] = []
while (
line_index < len(lowercase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCamelCase : List[Any] = lines[line_index]
_UpperCamelCase : Union[str, Any] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCamelCase : List[str] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCamelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCamelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCamelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCamelCase : str = lines[line_index]
_UpperCamelCase : Optional[int] = _re_import.search(lowercase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCamelCase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
def find_duplicates(lowercase_ ):
return [k for k, v in collections.Counter(lowercase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCamelCase : Optional[Any] = []
for key in import_dict_objects.keys():
_UpperCamelCase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_UpperCamelCase : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCamelCase : Tuple = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
_UpperCamelCase : Dict = os.path.join(lowercase_ ,"__init__.py" )
_UpperCamelCase : str = parse_init(lowercase_ )
if objects is not None:
_UpperCamelCase : Optional[Any] = analyze_results(*lowercase_ )
if len(lowercase_ ) > 0:
_UpperCamelCase : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(lowercase_ ) )
if len(lowercase_ ) > 0:
raise ValueError("\n\n".join(lowercase_ ) )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase : str = []
for path, directories, files in os.walk(lowercase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowercase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCamelCase : str = str((Path(lowercase_ ) / folder).relative_to(lowercase_ ) )
_UpperCamelCase : int = short_path.replace(os.path.sep ,"." )
submodules.append(lowercase_ )
for fname in files:
if fname == "__init__.py":
continue
_UpperCamelCase : List[Any] = str((Path(lowercase_ ) / fname).relative_to(lowercase_ ) )
_UpperCamelCase : Optional[int] = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowercase_ )
return submodules
lowerCamelCase__ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
from transformers.utils import direct_transformers_import
_UpperCamelCase : List[str] = direct_transformers_import(lowercase_ )
_UpperCamelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase_ ,"__init__.py" ) ,"r" ) as f:
_UpperCamelCase : Union[str, Any] = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" ,lowercase_ ) ) )
_UpperCamelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase_ ) > 0:
_UpperCamelCase : List[str] = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 624
| 1
|
from __future__ import annotations
from collections import namedtuple
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268
|
from sklearn.metrics import mean_squared_error
import datasets
_a: Any = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_a: List[Any] = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_a: List[str] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __A ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __A ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __A ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : str=None , lowerCAmelCase : str="uniform_average" , lowerCAmelCase : Any=True ):
'''simple docstring'''
UpperCAmelCase_ = mean_squared_error(
lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase , multioutput=lowerCAmelCase , squared=lowerCAmelCase )
return {"mse": mse}
| 268
| 1
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : List[Any] , **_lowercase : List[str] ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a ( A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(A__ )
SCREAMING_SNAKE_CASE__ : Dict = npimg.shape
return {"hash": hashimage(A__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowercase__ ( self : List[str] , _lowercase : Any , _lowercase : str , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = MaskGenerationPipeline(model=_lowercase , image_processor=_lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : List[Any] , _lowercase : int , _lowercase : Dict ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def lowercase__ ( self : Tuple ):
pass
@slow
@require_torch
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
SCREAMING_SNAKE_CASE__ : List[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_56 )
# Shortening by hashing
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_80, 6_40)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_80, 6_40)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_80, 6_40)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = '''facebook/sam-vit-huge'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''mask-generation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0053},
] , )
| 35
|
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , a : List[str] , a : Any=13 , a : str=30 , a : Optional[int]=2 , a : List[str]=3 , a : str=True , a : Union[str, Any]=True , a : Optional[Any]=32 , a : List[Any]=5 , a : str=4 , a : int=37 , a : Optional[int]="gelu" , a : Union[str, Any]=0.1 , a : List[str]=0.1 , a : int=10 , a : Dict=0.02 , a : str=3 , a : Optional[Any]=None , a : List[Any]=2 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE : int = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : Tuple = num_patches + 2
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : Union[str, Any] , a : int , a : Dict , a : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = DeiTModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Tuple , a : List[Any] , a : Optional[Any] , a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = DeiTForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowercase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : List[str] = DeiTForMaskedImageModeling(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = model(lowercase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : Tuple , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : int = DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Any = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = DeiTForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : str = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = DeiTModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = model_class(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
def __UpperCamelCase ( self : Union[str, Any] , a : str , a : List[str] , a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase__ ).loss
loss.backward()
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE : Any = model_class(lowercase__ )
model.gradient_checkpointing_enable()
model.to(lowercase__ )
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**lowercase__ ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase__ ),
*get_values(lowercase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE : int = problem_type["title"]
SCREAMING_SNAKE_CASE : Union[str, Any] = problem_type["num_labels"]
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowercase__ )
model.to(lowercase__ )
model.train()
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE : Optional[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE : Optional[int] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase__ ) as warning_list:
SCREAMING_SNAKE_CASE : Any = model(**lowercase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = DeiTModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=lowercase__ , return_tensors="pt" ).to(lowercase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**lowercase__ )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=lowercase__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.pixel_values.to(lowercase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowercase__ )
| 702
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : str , a : int = 16 , a : int = 88 , a : Optional[int] = None , a : Optional[int] = None , a : int = 1 , a : float = 0.0 , a : int = 32 , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : str = "geglu" , a : bool = True , a : bool = True , ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = attention_head_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = in_channels
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.GroupNorm(num_groups=a , num_channels=a , eps=1e-6 , affine=a )
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(a , a )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
a , a , a , dropout=a , cross_attention_dim=a , activation_fn=a , attention_bias=a , double_self_attention=a , norm_elementwise_affine=a , )
for d in range(a )
] )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(a , a )
def __UpperCamelCase ( self : Any , a : int , a : Optional[int]=None , a : List[str]=None , a : List[str]=None , a : Union[str, Any]=1 , a : Any=None , a : bool = True , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = hidden_states.shape
SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states[None, :].reshape(a , a , a , a , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
SCREAMING_SNAKE_CASE : Tuple = self.norm(a )
SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a , a )
SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(a )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : Optional[int] = block(
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , class_labels=a , )
# 3. Output
SCREAMING_SNAKE_CASE : List[str] = self.proj_out(a )
SCREAMING_SNAKE_CASE : int = (
hidden_states[None, None, :]
.reshape(a , a , a , a , a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
SCREAMING_SNAKE_CASE : Dict = hidden_states.reshape(a , a , a , a )
SCREAMING_SNAKE_CASE : int = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a )
| 193
| 0
|
import torch
def lowerCamelCase__ ( ):
'''simple docstring'''
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 0
print(f"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 376
|
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , __lowercase : Any , __lowercase : str=13 , __lowercase : Optional[Any]=7 , __lowercase : Tuple=True , __lowercase : Optional[int]=True , __lowercase : List[str]=True , __lowercase : Dict=True , __lowercase : Any=99 , __lowercase : Any=32 , __lowercase : List[Any]=5 , __lowercase : Any=4 , __lowercase : List[str]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : str=0.1 , __lowercase : List[str]=5_12 , __lowercase : Tuple=16 , __lowercase : List[str]=2 , __lowercase : Tuple=0.02 , __lowercase : Optional[Any]=False , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]="None" , __lowercase : Union[str, Any]=3 , __lowercase : int=4 , __lowercase : Tuple=None , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.get_config()
snake_case_ = 3_00
return config
def snake_case__ ( self : List[Any] , __lowercase : List[Any] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case__ ( self : List[str] , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int] , __lowercase : int ):
"""simple docstring"""
snake_case_ = DebertaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )[0]
snake_case_ = model(__lowercase , token_type_ids=__lowercase )[0]
snake_case_ = model(__lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case__ ( self : Any , __lowercase : Any , __lowercase : Dict , __lowercase : Any , __lowercase : Any , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = DebertaForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[str] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = DebertaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowercase )
def snake_case__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Any , __lowercase : List[Any] , __lowercase : str , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = DebertaForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : List[str] , __lowercase : str , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = DebertaForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
snake_case_ = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = DebertaModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowercase )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowercase )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowercase )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowercase )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowercase )
@slow
def snake_case__ ( self : str ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DebertaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
@slow
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = DebertaModel.from_pretrained("microsoft/deberta-base" )
snake_case_ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(__lowercase , attention_mask=__lowercase )[0]
# compare the actual values for a slice.
snake_case_ = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowercase , atol=1E-4 ) , f"{output[:, 1:4, 1:4]}" )
| 376
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ : Optional[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCamelCase , id=__UpperCamelCase )
| 194
| 1
|
import re
import string
import numpy as np
import datasets
UpperCamelCase__ = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
UpperCamelCase__ = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
UpperCamelCase__ = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase__ = np.array([re.sub(__lowerCAmelCase , """""" , __lowerCAmelCase ) for x in predictions] )
UpperCamelCase__ = np.array([re.sub(__lowerCAmelCase , """""" , __lowerCAmelCase ) for x in references] )
else:
UpperCamelCase__ = np.asarray(__lowerCAmelCase )
UpperCamelCase__ = np.asarray(__lowerCAmelCase )
if ignore_case:
UpperCamelCase__ = np.char.lower(__lowerCAmelCase )
UpperCamelCase__ = np.char.lower(__lowerCAmelCase )
if ignore_punctuation:
UpperCamelCase__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase__ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
UpperCamelCase__ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
if ignore_numbers:
UpperCamelCase__ = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase__ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
UpperCamelCase__ = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
UpperCamelCase__ = predictions == references
return {"exact_match": np.mean(__lowerCAmelCase ) * 100}
| 619
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = CodeGenTokenizer
snake_case : Dict = CodeGenTokenizerFast
snake_case : Tuple = True
snake_case : Optional[int] = {"""add_prefix_space""": True}
snake_case : int = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
UpperCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCamelCase__ = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = """lower newer"""
# Testing tokenization
UpperCamelCase__ = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
UpperCamelCase__ = tokens + [rust_tokenizer.unk_token]
UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCamelCase ( self , __lowerCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase__ = ("""This is a simple input""", """This is a pair""")
UpperCamelCase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input looooooooong""", """This is a simple input"""]
UpperCamelCase__ = ("""This is a simple input""", """This is a pair""")
UpperCamelCase__ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(*__lowerCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """$$$"""
UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase__ = tokenizer.decode(out_s.input_ids )
UpperCamelCase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
UpperCamelCase__ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
UpperCamelCase__ = """\nif len_a > len_b: result = a\nelse: result = b"""
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase )
UpperCamelCase__ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
UpperCamelCase__ = tokenizer.decode(__lowerCAmelCase , truncate_before_pattern=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
pass
| 619
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase__ ( __A ):
__UpperCamelCase = """fnet"""
def __init__( self , _lowercase=32_000 , _lowercase=768 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=512 , _lowercase=4 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=False , _lowercase=512 , _lowercase=3 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = use_tpu_fourier_optimizations
lowerCAmelCase_ : Union[str, Any] = tpu_short_seq_length
| 440
|
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( _a : bytes ) -> bytes:
if len(_a ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( _a : int ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : Tuple = format(_a , """08x""" )[-8:]
lowerCAmelCase_ : Any = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Tuple = B""""""
for char in message:
bit_string += format(_a , """08b""" ).encode("""utf-8""" )
lowerCAmelCase_ : Dict = format(len(_a ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_a ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( _a : bytes ) -> Generator[list[int], None, None]:
if len(_a ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_a ) , 5_12 ):
lowerCAmelCase_ : int = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Any = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase_ : List[str] = format(_a , """032b""" )
lowerCAmelCase_ : Optional[int] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_a , 2 )
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
return (a + b) % 2**32
def _lowerCAmelCase ( _a : int , _a : int ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( _a : bytes ) -> bytes:
lowerCAmelCase_ : Union[str, Any] = preprocess(_a )
lowerCAmelCase_ : Optional[int] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase_ : Tuple = 0X67452301
lowerCAmelCase_ : Optional[int] = 0XEFCDAB89
lowerCAmelCase_ : Tuple = 0X98BADCFE
lowerCAmelCase_ : Tuple = 0X10325476
lowerCAmelCase_ : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_a ):
lowerCAmelCase_ : int = aa
lowerCAmelCase_ : Any = ba
lowerCAmelCase_ : List[str] = ca
lowerCAmelCase_ : Optional[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : int = d ^ (b & (c ^ d))
lowerCAmelCase_ : List[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : List[Any] = c ^ (d & (b ^ c))
lowerCAmelCase_ : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : Union[str, Any] = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : Any = c ^ (b | not_aa(_a ))
lowerCAmelCase_ : List[str] = (7 * i) % 16
lowerCAmelCase_ : Tuple = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Tuple = d
lowerCAmelCase_ : Optional[Any] = c
lowerCAmelCase_ : Dict = b
lowerCAmelCase_ : List[Any] = sum_aa(_a , left_rotate_aa(_a , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Optional[int] = sum_aa(_a , _a )
lowerCAmelCase_ : Dict = sum_aa(_a , _a )
lowerCAmelCase_ : Tuple = sum_aa(_a , _a )
lowerCAmelCase_ : int = reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a ) + reformat_hex(_a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440
| 1
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
lowerCAmelCase = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
lowerCAmelCase = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Benchmark training of model'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Verbose memory tracing'''} )
lowerCAmelCase = field(
default=a__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Trace memory line by line'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Save result to a CSV file'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Save all print statements in a log file'''} )
lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to print environment information'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
lowerCAmelCase = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
lowerCAmelCase = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
lowerCAmelCase = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
lowerCAmelCase = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
lowerCAmelCase = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
lowerCAmelCase = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
lowerCAmelCase = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
lowerCAmelCase = field(
default=a__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].')
return self.models
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True
| 8
|
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[int] = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowercase__ : Dict = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : List[str] = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : Tuple = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : str = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : str = re.compile(r'''\[([^\]]+)\]''')
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
__A : List[Any] = _re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : str="" , __snake_case : Any=None , __snake_case : List[Any]=None ) -> Optional[int]:
__A : Tuple = 0
__A : Optional[int] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
__A : Optional[int] = ['\n'.join(lines[:index] )]
else:
__A : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__A : Tuple = [lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__snake_case ) )
if index < len(__snake_case ) - 1:
__A : Union[str, Any] = [lines[index + 1]]
index += 1
else:
__A : Union[str, Any] = []
else:
blocks.append('\n'.join(__snake_case ) )
__A : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append('\n'.join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( __snake_case : List[Any] ) -> int:
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace('_' , '' )
return _inner
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : Any=None ) -> List[Any]:
# If no key is provided, we use a noop.
def noop(__snake_case : List[Any] ):
return x
if key is None:
__A : Optional[Any] = noop
# Constants are all uppercase, they go first.
__A : str = [obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__A : List[str] = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
__A : str = [obj for obj in objects if not key(__snake_case )[0].isupper()]
__A : Tuple = ignore_underscore(__snake_case )
return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case )
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(__snake_case : Tuple ):
__A : List[str] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
__A : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Dict = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(__snake_case )] ) + "]"
__A : List[Any] = import_statement.split('\n' )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__A : Optional[int] = 2 if lines[1].strip() == '[' else 1
__A : Any = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__A : Optional[int] = sort_objects(__snake_case , key=lambda __snake_case : x[1] )
__A : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__A : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] )
else:
__A : Dict = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__A : Tuple = keys[:-1]
__A : List[Any] = get_indent(lines[1] ) + ', '.join([f'"{k}"' for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
__A : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case )
return import_statement
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : List[Any]=True ) -> Optional[Any]:
with open(__snake_case , 'r' ) as f:
__A : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__A : str = split_code_in_indented_blocks(
__snake_case , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__A : Tuple = main_blocks[block_idx]
__A : int = block.split('\n' )
# Get to the start of the imports.
__A : Tuple = 0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__A : Optional[int] = len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
__A : Dict = '\n'.join(block_lines[line_idx:-1] )
__A : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__A : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
__A : Any = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__A : Dict = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__A : Optional[Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None]
__A : Tuple = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__A : str = 0
__A : Any = []
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__A : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
__A : int = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(__snake_case , 'w' ) as f:
f.write('\n'.join(__snake_case ) )
def _lowerCAmelCase ( __snake_case : int=True ) -> Optional[Any]:
__A : Tuple = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
__A : List[Any] = sort_imports(os.path.join(__snake_case , '__init__.py' ) , check_only=__snake_case )
if result:
__A : Dict = [os.path.join(__snake_case , '__init__.py' )]
if len(__snake_case ) > 0:
raise ValueError(f'Would overwrite {len(__snake_case )} files, run `make style`.' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 8
| 1
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase__ ( _A = 3 ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(UpperCamelCase__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
a : str = QuantumRegister(UpperCamelCase__ , 'qr' )
a : List[Any] = ClassicalRegister(UpperCamelCase__ , 'cr' )
a : Tuple = QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
a : int = number_of_qubits
for i in range(UpperCamelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase__ , UpperCamelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase__ , UpperCamelCase__ )
# simulate with 10000 shots
a : Union[str, Any] = Aer.get_backend('qasm_simulator' )
a : List[str] = execute(UpperCamelCase__ , UpperCamelCase__ , shots=1_0000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}"
)
| 715
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCamelCase__ ( _A ):
return EnvironmentCommand()
class a__( lowerCamelCase__ ):
@staticmethod
def lowercase_ ( __snake_case : ArgumentParser ):
a : Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=__snake_case )
def lowercase_ ( self : List[str] ):
a : str = huggingface_hub.__version__
a : List[str] = 'not installed'
a : List[str] = 'NA'
if is_torch_available():
import torch
a : Optional[Any] = torch.__version__
a : int = torch.cuda.is_available()
a : Optional[int] = 'not installed'
if is_transformers_available():
import transformers
a : Tuple = transformers.__version__
a : Dict = 'not installed'
if is_accelerate_available():
import accelerate
a : int = accelerate.__version__
a : Any = 'not installed'
if is_xformers_available():
import xformers
a : Optional[int] = xformers.__version__
a : List[str] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__snake_case ) )
return info
@staticmethod
def lowercase_ ( __snake_case : Union[str, Any] ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 195
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a (_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([])
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = embedder_hidden_size
# image encoding components
_SCREAMING_SNAKE_CASE = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A__ , projection_dim=A__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=A__ )
_SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A__ , layers_per_block=1 , upcast_attention=A__ , use_linear_projection=A__ , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=A__ , steps_offset=1 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoencoderKL()
_SCREAMING_SNAKE_CASE = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def UpperCamelCase ( self , A__ , A__=0 , A__=True ) -> Tuple:
if str(A__ ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE = torch.manual_seed(A__ )
else:
_SCREAMING_SNAKE_CASE = torch.Generator(device=A__ ).manual_seed(A__ )
_SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
if pil_image:
_SCREAMING_SNAKE_CASE = input_image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = input_image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_SCREAMING_SNAKE_CASE = DiffusionPipeline.numpy_to_pil(A__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline(**A__ )
_SCREAMING_SNAKE_CASE = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A__ )
inputs.update({"""image_embeds""": None} )
_SCREAMING_SNAKE_CASE = sd_pipe(**A__ ).images
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=A__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A__ )
@slow
@require_torch_gpu
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(A__ , """anime turle""" , generator=A__ , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A__ , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
_SCREAMING_SNAKE_CASE = pipe(A__ , """anime turle""" , generator=A__ , output_type="""np""" )
_SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A__ , A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE = pipe(
A__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 591
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE = 8
# DPR tok
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(A__ , exist_ok=A__ )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(A__ , range(len(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(A__ , exist_ok=A__ )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(A__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def UpperCamelCase ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCamelCase ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def UpperCamelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , """rag_tokenizer""" )
_SCREAMING_SNAKE_CASE = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_SCREAMING_SNAKE_CASE = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(A__ )
rag_tokenizer.save_pretrained(A__ )
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(A__ , config=A__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , A__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , A__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
_SCREAMING_SNAKE_CASE = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_SCREAMING_SNAKE_CASE = tokenizer(A__ )
self.assertIsNotNone(A__ )
@slow
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
_SCREAMING_SNAKE_CASE = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
_SCREAMING_SNAKE_CASE = tokenizer(A__ )
self.assertIsNotNone(A__ )
| 591
| 1
|
from __future__ import annotations
import math
def __lowercase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Any ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
)
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = [90, 23, 6, 33, 21, 65, 123, 34423]
SCREAMING_SNAKE_CASE__ = math.log(len(SCREAMING_SNAKE_CASE_ ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 703
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = CustomTokenizer
pass
| 112
| 0
|
'''simple docstring'''
from math import loga
def A_ ( _lowerCamelCase : int ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309
| 1
|
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 10_00 ):
lowercase_ : int = 1
lowercase_ : List[str] = 0
for divide_by_number in range(lowerCAmelCase_ , digit + 1 ):
lowercase_ : list[int] = []
lowercase_ : Optional[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowerCAmelCase_ ):
lowercase_ : Dict = len(lowerCAmelCase_ )
lowercase_ : List[Any] = divide_by_number
else:
has_been_divided.append(lowerCAmelCase_ )
lowercase_ : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE =random.Random()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None ):
if rng is None:
lowercase_ : Union[str, Any] = global_rng
lowercase_ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=400 ,__UpperCamelCase=2000 ,__UpperCamelCase=10 ,__UpperCamelCase=160 ,__UpperCamelCase=8 ,__UpperCamelCase=0.0 ,__UpperCamelCase=4000 ,__UpperCamelCase=False ,__UpperCamelCase=True ,) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : Optional[Any] = batch_size
lowercase_ : Optional[int] = min_seq_length
lowercase_ : List[Any] = max_seq_length
lowercase_ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : Tuple = padding_value
lowercase_ : Dict = sampling_rate
lowercase_ : List[str] = return_attention_mask
lowercase_ : str = do_normalize
lowercase_ : str = feature_size
lowercase_ : List[Any] = chunk_length
lowercase_ : Optional[int] = hop_length
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self ,__UpperCamelCase=False ,__UpperCamelCase=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(__UpperCamelCase ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
lowercase_ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
lowercase_ : int = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : int = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : List[Any] = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
lowercase_ : Optional[Any] = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = feat_extract_first.to_dict()
lowercase_ : int = feat_extract_second.to_dict()
lowercase_ : List[Any] = feat_extract_first.mel_filters
lowercase_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCamelCase )
lowercase_ : Dict = self.feature_extraction_class.from_json_file(__UpperCamelCase )
lowercase_ : Optional[Any] = feat_extract_first.to_dict()
lowercase_ : Optional[Any] = feat_extract_second.to_dict()
lowercase_ : List[str] = feat_extract_first.mel_filters
lowercase_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
lowercase_ : str = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase_ : Tuple = feature_extractor(__UpperCamelCase ,padding='max_length' ,return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase_ : List[str] = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_features
lowercase_ : List[str] = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_features
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
# Test batched
lowercase_ : Dict = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
lowercase_ : List[str] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : Optional[int] = np.asarray(__UpperCamelCase )
lowercase_ : List[str] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
lowercase_ : Dict = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
# Test truncation required
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
lowercase_ : Union[str, Any] = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
lowercase_ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase_ : int = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
lowercase_ : Tuple = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
lowercase_ : Optional[Any] = feature_extractor(__UpperCamelCase ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase ,__UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
import torch
lowercase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[str] = np.random.rand(100 ,32 ).astype(np.floataa )
lowercase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ : Optional[Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase_ : Union[str, Any] = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
lowercase_ : int = ds.sort('id' ).select(range(__UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
lowercase_ : Optional[int] = self._load_datasamples(1 )
lowercase_ : Dict = WhisperFeatureExtractor()
lowercase_ : str = feature_extractor(__UpperCamelCase ,return_tensors='pt' ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,__UpperCamelCase ,atol=1e-4 ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ : List[Any] = self._load_datasamples(1 )[0]
lowercase_ : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
lowercase_ : Tuple = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 477
| 0
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: set , SCREAMING_SNAKE_CASE_: set , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: PriorityQueue , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A__ = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
A__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A__ = new_cost_f
A__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: dict ) -> int:
'''simple docstring'''
A__ = -1
A__ = set()
A__ = set()
A__ = {source: 0}
A__ = {destination: 0}
A__ = {source: None}
A__ = {destination: None}
A__ = PriorityQueue()
A__ = PriorityQueue()
A__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A__ , A__ = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
A__ , A__ = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
A__ = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
A__ = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A__ = shortest_distance
return shortest_path_distance
lowerCAmelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
lowerCAmelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 514
| 1
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowercase :Optional[Any] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowercase :List[Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCamelCase, 1 ):
if n < _p:
# then we have our last prime to check
lowercase :Tuple = primes[:idx]
break
lowercase :str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase :Tuple = False
for r in range(lowerCamelCase ):
lowercase :Any = pow(lowerCamelCase, d * 2**r, lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase :Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase__ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 711
|
def UpperCAmelCase__ ( lowerCamelCase ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase :str = 1
lowercase :Tuple = 1
while repunit:
lowercase :Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__ ( lowerCamelCase = 1000000 ):
lowercase :List[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 453
| 0
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = LayoutLMTokenizer
a : Optional[int] = LayoutLMTokenizerFast
a : int = True
a : int = True
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
super().setUp()
UpperCamelCase__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase__ : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase__ ( self, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = '''UNwant\u00E9d,running'''
UpperCamelCase__ : str = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ : Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__magic_name__, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ), [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
| 253
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 253
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 705
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = create_model(
"HTSAT-tiny" , "roberta" , lowerCAmelCase__ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=lowerCAmelCase__ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = r".*sequential.(\d+).*"
UpperCAmelCase_ = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_ = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_ = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_ = 1 if projecton_layer == 0 else 2
UpperCAmelCase_ = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_ = value
UpperCAmelCase_ = mixed_qkv.size(0 ) // 3
UpperCAmelCase_ = mixed_qkv[:qkv_dim]
UpperCAmelCase_ = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_ = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_ = query_layer
UpperCAmelCase_ = key_layer
UpperCAmelCase_ = value_layer
else:
UpperCAmelCase_ = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
UpperCAmelCase_ , UpperCAmelCase_ = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_ = clap_model.state_dict()
UpperCAmelCase_ = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = ClapConfig()
UpperCAmelCase_ = enable_fusion
UpperCAmelCase_ = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 14
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( _a , unittest.TestCase ):
A_ : Dict = KandinskyImgaImgPipeline
A_ : Tuple = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
A_ : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
A_ : List[Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Dict = False
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
return 32
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
return 32
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
return self.time_input_dim
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
return 100
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
A = MultilingualCLIP(lowerCAmelCase_ )
A = text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[str] ) -> Tuple:
torch.manual_seed(0 )
A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : Any ) -> str:
torch.manual_seed(0 )
A = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : int ) -> str:
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_unet
A = self.dummy_movq
A = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A = DDIMScheduler(**lowerCAmelCase_ )
A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any]=0 ) -> List[str]:
A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase_ )
# create init_image
A = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' ).resize((256, 256) )
if str(lowerCAmelCase_ ).startswith('mps' ):
A = torch.manual_seed(lowerCAmelCase_ )
else:
A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**lowerCAmelCase_ )
A = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
A = output.images
A = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A = 'A red cartoon frog, 4k'
A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
A = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A , A = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A = pipeline(
lowerCAmelCase_ , image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 106
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
return (data["data"], data["target"])
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ):
'''simple docstring'''
_a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase , UpperCamelCase )
# Predict target for test data
_a = xgb.predict(UpperCamelCase )
_a = predictions.reshape(len(UpperCamelCase ) , 1 )
return predictions
def snake_case_ ():
'''simple docstring'''
_a = fetch_california_housing()
_a , _a = data_handling(UpperCamelCase )
_a , _a , _a , _a = train_test_split(
UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 )
_a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' )
print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 22
| 0
|
'''simple docstring'''
from ....utils import logging
lowercase : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=20_48 ):
"""simple docstring"""
_snake_case = config.__dict__
_snake_case = modal_hidden_size
if num_labels:
_snake_case = num_labels
| 542
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : int = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 542
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__UpperCamelCase : List[Any] = False
try:
__UpperCamelCase : int = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class __UpperCamelCase :
def __init__( self : Dict , _lowerCAmelCase : str = None , _lowerCAmelCase : list = [] ) -> Tuple:
"""simple docstring"""
__lowercase = 0
__lowercase = choices
__lowercase = prompt
if sys.platform == "win32":
__lowercase = """*"""
else:
__lowercase = """➔ """
def _a ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : str = "" ) -> str:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowerCAmelCase )
else:
forceWrite(self.choices[index] , _lowerCAmelCase )
def _a ( self : Dict , _lowerCAmelCase : int ) -> Any:
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowerCAmelCase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _a ( self : Optional[Any] , _lowerCAmelCase : Direction , _lowerCAmelCase : int = 1 ) -> Dict:
"""simple docstring"""
__lowercase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowerCAmelCase )
move_cursor(_lowerCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _a ( self : Any ) -> Dict:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _a ( self : Tuple ) -> Dict:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowerCAmelCase )] for number in range(10 )] )
def _a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase = int(chr(self.current_selection ) )
__lowercase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowerCAmelCase )
else:
return
else:
return
def _a ( self : Optional[int] , _lowerCAmelCase : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
__lowercase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowerCAmelCase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
__lowercase = int(builtins.input() )
except ValueError:
__lowercase = default_choice
else:
__lowercase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowerCAmelCase , """\n""" )
return choice
| 80
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = torch.nn.Linear(10 , 10 )
__lowercase = torch.optim.SGD(model.parameters() , 0.1 )
__lowercase = Accelerator()
__lowercase = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 80
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
SCREAMING_SNAKE_CASE__ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''maskformer'''
__SCREAMING_SNAKE_CASE : int = {'''hidden_size''': '''mask_feature_size'''}
__SCREAMING_SNAKE_CASE : Tuple = ['''resnet''', '''swin''']
__SCREAMING_SNAKE_CASE : List[str] = ['''detr''']
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : int = 2_5_6 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : float = 20.0 , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__a : Any = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Any = backbone_config.pop('model_type' )
__a : int = CONFIG_MAPPING[backbone_model_type]
__a : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__a : Dict = DetrConfig()
else:
# verify that the decoder is supported
__a : int = (
decoder_config.pop('model_type' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[str] = CONFIG_MAPPING[decoder_type]
__a : Union[str, Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
__a : Tuple = backbone_config
__a : Tuple = decoder_config
# main feature dimension for the model
__a : Union[str, Any] = fpn_feature_size
__a : List[Any] = mask_feature_size
# initializer
__a : Dict = init_std
__a : Optional[int] = init_xavier_std
# Hungarian matcher && loss
__a : int = cross_entropy_weight
__a : Any = dice_weight
__a : List[Any] = mask_weight
__a : int = use_auxiliary_loss
__a : Any = no_object_weight
__a : str = output_auxiliary_logits
__a : Tuple = self.decoder_config.encoder_attention_heads
__a : int = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE__ : PretrainedConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , decoder_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Optional[int] = copy.deepcopy(self.__dict__ )
__a : Optional[Any] = self.backbone_config.to_dict()
__a : Optional[Any] = self.decoder_config.to_dict()
__a : Optional[Any] = self.__class__.model_type
return output
| 577
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=False ):
__a : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : Union[str, Any] = ''
else:
__a : int = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Optional[Any] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
__a : Optional[Any] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__a : str = in_proj_bias[: config.hidden_size]
__a : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : int = in_proj_weight[
-config.hidden_size :, :
]
__a : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Optional[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__a : str = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ):
__a : Optional[int] = dct.pop(lowerCamelCase_ )
__a : str = val
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ):
__a : Any = ViTMSNConfig()
__a : Tuple = 1_0_0_0
__a : str = 'datasets/huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Optional[Any] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ ) , 'r' ) )
__a : Dict = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__a : str = idalabel
__a : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__a : List[Any] = 3_8_4
__a : str = 1_5_3_6
__a : Dict = 6
elif "l16" in checkpoint_url:
__a : str = 1_0_2_4
__a : str = 4_0_9_6
__a : Any = 2_4
__a : List[str] = 1_6
__a : int = 0.1
elif "b4" in checkpoint_url:
__a : str = 4
elif "l7" in checkpoint_url:
__a : Dict = 7
__a : Optional[Any] = 1_0_2_4
__a : int = 4_0_9_6
__a : Dict = 2_4
__a : List[Any] = 1_6
__a : int = 0.1
__a : Tuple = ViTMSNModel(lowerCamelCase_ )
__a : int = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location='cpu' )['target_encoder']
__a : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowerCamelCase_ )
__a : Optional[int] = create_rename_keys(lowerCamelCase_ , base_model=lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , base_model=lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
__a : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
__a : Union[str, Any] = ViTImageProcessor(
size=config.image_size , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
__a : List[Any] = image_processor(images=lowerCamelCase_ , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
__a : int = model(**lowerCamelCase_ )
__a : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__a : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
__a : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
__a : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
__a : Tuple = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
__a : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowerCamelCase_ , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 577
| 1
|
_lowercase = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __lowerCAmelCase ( _UpperCamelCase ) -> int:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowerCamelCase__: str = 0
lowerCamelCase__: str = 0
while place < len(_SCREAMING_SNAKE_CASE ):
if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __lowerCAmelCase ( _UpperCamelCase ) -> str:
'''simple docstring'''
lowerCamelCase__: List[Any] = []
for arabic, roman in ROMAN:
(lowerCamelCase__): Tuple = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = (KDPMaDiscreteScheduler,)
lowerCamelCase_ : str = 1_0
def _lowercase ( self , **UpperCamelCase__ ) -> int:
lowerCamelCase : Optional[Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase__ )
return config
def _lowercase ( self ) -> List[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def _lowercase ( self ) -> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def _lowercase ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCamelCase : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Tuple = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Union[str, Any] = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : List[str] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Optional[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2
assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def _lowercase ( self ) -> str:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Optional[Any] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : int = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def _lowercase ( self ) -> Optional[int]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Any = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = model(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = output.prev_sample
lowerCamelCase : Dict = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCamelCase : Dict = torch.mean(torch.abs(UpperCamelCase__ ) )
if str(UpperCamelCase__ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 311
| 0
|
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> qiskit.result.counts.Counts:
lowerCamelCase__ : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase__ : Union[str, Any] = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCamelCase__ : List[Any] = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
_A : List[str] =single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 631
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str=32 ):
set_seed(0 )
lowerCamelCase__ : Optional[int] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
lowerCamelCase__ : List[Any] = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
lowerCamelCase__ : Any = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase__ : str = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
lowerCamelCase__ : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : str = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Tuple = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase__ : Optional[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , timesteps[i] ).sample
lowerCamelCase__ : Union[str, Any] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 631
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _snake_case ( __snake_case : Callable[[int | float], int | float] , __snake_case : int | float , __snake_case : int | float , __snake_case : int = 100 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = x_start
_lowerCamelCase : Optional[int] = fnc(__snake_case )
_lowerCamelCase : Optional[int] = 0.0
for _ in range(__snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_lowerCamelCase : int = (x_end - x_start) / steps + xa
_lowerCamelCase : List[Any] = fnc(__snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_lowerCamelCase : List[Any] = xa
_lowerCamelCase : Any = fxa
return area
if __name__ == "__main__":
def _snake_case ( __snake_case : List[Any] ):
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
UpperCAmelCase = 10
while i <= 10_0000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 88
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 51
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = ['sentencepiece']
def __init__( self: str , *_UpperCAmelCase: int , **_UpperCAmelCase: Optional[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self: Tuple , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: int ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self: Union[str, Any] , *_UpperCAmelCase: Dict , **_UpperCAmelCase: Dict ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = ['sentencepiece']
def __init__( self: Dict , *_UpperCAmelCase: Dict , **_UpperCAmelCase: Optional[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self: Dict , *_UpperCAmelCase: str , **_UpperCAmelCase: Optional[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['sentencepiece']
def __init__( self: Union[str, Any] , *_UpperCAmelCase: int , **_UpperCAmelCase: Dict ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self: Union[str, Any] , *_UpperCAmelCase: Union[str, Any] , **_UpperCAmelCase: str ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Any = ['sentencepiece']
def __init__( self: Optional[int] , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: List[str] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['sentencepiece']
def __init__( self: Optional[Any] , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: Union[str, Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = ['sentencepiece']
def __init__( self: Optional[int] , *_UpperCAmelCase: List[str] , **_UpperCAmelCase: List[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['sentencepiece']
def __init__( self: Union[str, Any] , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: List[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = ['sentencepiece']
def __init__( self: int , *_UpperCAmelCase: Dict , **_UpperCAmelCase: int ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = ['sentencepiece']
def __init__( self: Tuple , *_UpperCAmelCase: Union[str, Any] , **_UpperCAmelCase: str ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = ['sentencepiece']
def __init__( self: Any , *_UpperCAmelCase: List[Any] , **_UpperCAmelCase: Tuple ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = ['sentencepiece']
def __init__( self: Any , *_UpperCAmelCase: int , **_UpperCAmelCase: Optional[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['sentencepiece']
def __init__( self: Optional[int] , *_UpperCAmelCase: Union[str, Any] , **_UpperCAmelCase: Union[str, Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = ['sentencepiece']
def __init__( self: Tuple , *_UpperCAmelCase: Union[str, Any] , **_UpperCAmelCase: Tuple ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self: List[str] , *_UpperCAmelCase: Any , **_UpperCAmelCase: str ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = ['sentencepiece']
def __init__( self: Any , *_UpperCAmelCase: Dict , **_UpperCAmelCase: Optional[Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Dict = ['sentencepiece']
def __init__( self: Optional[Any] , *_UpperCAmelCase: Union[str, Any] , **_UpperCAmelCase: Union[str, Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = ['sentencepiece']
def __init__( self: List[str] , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: List[str] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self: Union[str, Any] , *_UpperCAmelCase: Any , **_UpperCAmelCase: List[str] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['sentencepiece']
def __init__( self: List[str] , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: Union[str, Any] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['sentencepiece']
def __init__( self: Any , *_UpperCAmelCase: List[str] , **_UpperCAmelCase: int ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Optional[int] = ['sentencepiece']
def __init__( self: int , *_UpperCAmelCase: int , **_UpperCAmelCase: Optional[int] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self: Dict , *_UpperCAmelCase: Dict , **_UpperCAmelCase: List[str] ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[str] = ['sentencepiece']
def __init__( self: int , *_UpperCAmelCase: Optional[Any] , **_UpperCAmelCase: Any ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : str = ['sentencepiece']
def __init__( self: Any , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: str ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : Tuple = ['sentencepiece']
def __init__( self: Optional[Any] , *_UpperCAmelCase: Optional[int] , **_UpperCAmelCase: Any ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : int = ['sentencepiece']
def __init__( self: List[Any] , *_UpperCAmelCase: str , **_UpperCAmelCase: str ):
requires_backends(self , ['sentencepiece'] )
class UpperCAmelCase_ (metaclass=snake_case__ ):
"""simple docstring"""
lowerCamelCase : List[Any] = ['sentencepiece']
def __init__( self: Any , *_UpperCAmelCase: List[str] , **_UpperCAmelCase: Union[str, Any] ):
requires_backends(self , ['sentencepiece'] )
| 382
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : str=None , __magic_name__ : str=None , __magic_name__ : Dict=None ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = True
while ask_again:
_lowerCAmelCase :int = input(__magic_name__ )
try:
if default is not None and len(__magic_name__ ) == 0:
return default
return convert_value(__magic_name__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__magic_name__ )
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : str=[] , __magic_name__ : Optional[Any]=None , __magic_name__ : List[str]=0 ):
"""simple docstring"""
_lowerCAmelCase :Tuple = BulletMenu(__magic_name__ , __magic_name__ )
_lowerCAmelCase :int = menu.run(default_choice=__magic_name__ )
return convert_value(__magic_name__ ) if convert_value is not None else result
def UpperCamelCase_( __magic_name__ : int ):
"""simple docstring"""
_lowerCAmelCase :int = int(__magic_name__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def UpperCamelCase_( __magic_name__ : Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase :int = int(__magic_name__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(__magic_name__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = int(__magic_name__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def UpperCamelCase_( __magic_name__ : Dict ):
"""simple docstring"""
_lowerCAmelCase :Tuple = int(__magic_name__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def UpperCamelCase_( __magic_name__ : Any ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase_ (argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: str , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ):
_lowerCAmelCase :int = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :List[Any] = usage.replace('<command> [<args>] ' , '' )
return usage
| 382
| 1
|
from string import ascii_uppercase
_a : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
_a : Union[str, Any] = dict(enumerate(ascii_uppercase))
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : str ):
lowerCAmelCase__ :List[Any] = len(UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 0
while True:
if x == i:
lowerCAmelCase__ :List[Any] = 0
if len(UpperCAmelCase ) == len(UpperCAmelCase ):
break
key += key[i]
i += 1
return key
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : str ):
lowerCAmelCase__ :Optional[int] = ""
lowerCAmelCase__ :Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCAmelCase__ :Tuple = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : str ):
lowerCAmelCase__ :Union[str, Any] = ""
lowerCAmelCase__ :Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCAmelCase__ :Tuple = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def snake_case__ ( ):
lowerCAmelCase__ :Optional[int] = "THE GERMAN ATTACK"
lowerCAmelCase__ :Optional[Any] = "SECRET"
lowerCAmelCase__ :Any = generate_key(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Tuple = cipher_text(UpperCAmelCase , UpperCAmelCase )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(UpperCAmelCase , UpperCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 145
|
_a : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_a : int = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_a : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 145
| 1
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase__ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
lowerCAmelCase__ = F"""{src_lang}-{tgt_lang}"""
lowerCAmelCase__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = os.path.join(A , '''README.md''' )
print(F"""Generating {path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(A )
# make sure we are under the root of the project
__UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
__UpperCAmelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = model_name.split('''-''')
__UpperCAmelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 98
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _snake_case ( A , A , A , A , A ) -> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase__ = getattr(A , A )
if weight_type is not None:
lowerCAmelCase__ = getattr(A , A ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "inv_freq":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( A , A , A ) -> Any:
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(A )[0].split('''.''' )[-2]
lowerCAmelCase__ = mapped_key.replace('''*''' , A )
if "pos_bias_u" in name:
lowerCAmelCase__ = None
elif "pos_bias_v" in name:
lowerCAmelCase__ = None
elif "weight_g" in name:
lowerCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase__ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase__ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase__ = '''num_batches_tracked'''
else:
lowerCAmelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( A , A , A , A , A ) -> Tuple:
lowerCAmelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ = name.split('''.''' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def _snake_case ( A , A , A=None , A=None , A=True ) -> Optional[int]:
if config_path is not None:
lowerCAmelCase__ = WavaVecaConformerConfig.from_pretrained(A , hidden_act='''swish''' )
else:
lowerCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
lowerCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowerCAmelCase__ = WavaVecaConformerForCTC(A )
else:
lowerCAmelCase__ = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='''audio_pretraining''' )
lowerCAmelCase__ = fairseq.tasks.setup_task(A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 98
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
SCREAMING_SNAKE_CASE = ['a', 'b', 'c', 'd', 'e']
def lowercase_ ( __A : Dict , __A : Optional[int] , __A : int ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] =start
# add current to visited
visited.append(__A )
lowercase : int =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase : Dict =topological_sort(__A , __A , __A )
# if all neighbors visited add current to sort
sort.append(__A )
# if all vertices haven't been visited select a new one to visit
if len(__A ) != len(__A ):
for vertice in vertices:
if vertice not in visited:
lowercase : Tuple =topological_sort(__A , __A , __A )
# return sort
return sort
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = topological_sort('a', [], [])
print(sort)
| 94
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = """deformable_detr"""
lowerCAmelCase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : List[Any]=6 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]="relu" , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : int=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple="sine" , UpperCamelCase__ : Optional[Any]="resnet50" , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=300 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=0.25 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCamelCase =backbone_config.get('''model_type''' )
_UpperCamelCase =CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase =config_class.from_dict(UpperCamelCase__ )
_UpperCamelCase =use_timm_backbone
_UpperCamelCase =backbone_config
_UpperCamelCase =num_channels
_UpperCamelCase =num_queries
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =d_model
_UpperCamelCase =encoder_ffn_dim
_UpperCamelCase =encoder_layers
_UpperCamelCase =encoder_attention_heads
_UpperCamelCase =decoder_ffn_dim
_UpperCamelCase =decoder_layers
_UpperCamelCase =decoder_attention_heads
_UpperCamelCase =dropout
_UpperCamelCase =attention_dropout
_UpperCamelCase =activation_dropout
_UpperCamelCase =activation_function
_UpperCamelCase =init_std
_UpperCamelCase =init_xavier_std
_UpperCamelCase =encoder_layerdrop
_UpperCamelCase =auxiliary_loss
_UpperCamelCase =position_embedding_type
_UpperCamelCase =backbone
_UpperCamelCase =use_pretrained_backbone
_UpperCamelCase =dilation
# deformable attributes
_UpperCamelCase =num_feature_levels
_UpperCamelCase =encoder_n_points
_UpperCamelCase =decoder_n_points
_UpperCamelCase =two_stage
_UpperCamelCase =two_stage_num_proposals
_UpperCamelCase =with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase =class_cost
_UpperCamelCase =bbox_cost
_UpperCamelCase =giou_cost
# Loss coefficients
_UpperCamelCase =mask_loss_coefficient
_UpperCamelCase =dice_loss_coefficient
_UpperCamelCase =bbox_loss_coefficient
_UpperCamelCase =giou_loss_coefficient
_UpperCamelCase =eos_coefficient
_UpperCamelCase =focal_alpha
_UpperCamelCase =disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Tuple ) -> int:
return self.d_model
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase =self.backbone_config.to_dict()
_UpperCamelCase =self.__class__.model_type
return output
| 404
| 0
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def lowercase__ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]=None , **lowerCAmelCase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = [x.strip() for x in open(lowerCAmelCase__ ).readlines()]
a__ : Optional[Any] = [x.strip() for x in open(lowerCAmelCase__ ).readlines()][: len(lowerCAmelCase__ )]
a__ : List[Any] = calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
if save_path is not None:
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 717
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
| 0
|
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :Union[str, Any] )->Union[str, Any]:
'''simple docstring'''
snake_case_ = sorted(numsa + numsa )
snake_case_ , snake_case_ = divmod(len(lowercase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :Union[str, Any] = [float(x) for x in input('''Enter the elements of first array: ''').split()]
SCREAMING_SNAKE_CASE :int = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 283
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
debug_launcher(test_script.main )
def __lowerCAmelCase ( self ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 187
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=13 , SCREAMING_SNAKE_CASE_ : Optional[int]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[int]=99 , SCREAMING_SNAKE_CASE_ : Optional[int]=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : int=16 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ):
__lowerCamelCase: str = parent
__lowerCamelCase: str = batch_size
__lowerCamelCase: Tuple = seq_length
__lowerCamelCase: Tuple = is_training
__lowerCamelCase: str = use_input_mask
__lowerCamelCase: List[str] = use_token_type_ids
__lowerCamelCase: List[str] = use_labels
__lowerCamelCase: Any = vocab_size
__lowerCamelCase: Union[str, Any] = hidden_size
__lowerCamelCase: Optional[Any] = num_hidden_layers
__lowerCamelCase: Optional[Any] = num_attention_heads
__lowerCamelCase: Optional[Any] = intermediate_multiple_size
__lowerCamelCase: Any = hidden_act
__lowerCamelCase: Dict = hidden_dropout
__lowerCamelCase: Dict = attention_dropout
__lowerCamelCase: Dict = weight_tying
__lowerCamelCase: Tuple = max_position_embeddings
__lowerCamelCase: List[str] = type_vocab_size
__lowerCamelCase: Tuple = type_sequence_label_size
__lowerCamelCase: Union[str, Any] = initializer_range
__lowerCamelCase: Dict = num_labels
__lowerCamelCase: List[str] = num_choices
__lowerCamelCase: Union[str, Any] = scope
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase: Dict = None
if self.use_input_mask:
__lowerCamelCase: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase: int = None
if self.use_labels:
__lowerCamelCase: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase: Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase: int = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: Any = GPTNeoXJapaneseModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase: List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: Optional[Any] = True
__lowerCamelCase: Optional[Any] = GPTNeoXJapaneseModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase: str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Union[str, Any] = GPTNeoXJapaneseForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase: int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCamelCase: List[Any] = True
__lowerCamelCase: List[Any] = GPTNeoXJapaneseForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# first forward pass
__lowerCamelCase: Dict = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase: List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase: Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase: Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase: Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase: Any = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = output_from_no_past["""hidden_states"""][0]
__lowerCamelCase: Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase: List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase: Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: List[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: List[str] = config_and_inputs
__lowerCamelCase: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Optional[int] = GPTNeoXJapaneseModelTester(self )
__lowerCamelCase: Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: str = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase: Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: int = """abeja/gpt-neox-japanese-2.7b"""
__lowerCamelCase: int = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
__lowerCamelCase: Optional[Any] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
__lowerCamelCase: List[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = []
for prompt in prompts:
__lowerCamelCase: Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).input_ids
__lowerCamelCase: Any = model.generate(SCREAMING_SNAKE_CASE_ , max_length=50 )
__lowerCamelCase: Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
predicted_outputs += generated_string
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 189
|
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ):
# Mapping from the first character of the prefix of the node
__lowerCamelCase: dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__lowerCamelCase: str = is_leaf
__lowerCamelCase: Optional[int] = prefix
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Optional[Any] = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : list[str] ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCamelCase: Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCamelCase: Any = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Union[str, Any] = self.nodes[word[0]]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: List[str] = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCamelCase: List[Any] = remaining_prefix
__lowerCamelCase: Optional[Any] = self.nodes[matching_string[0]]
__lowerCamelCase: Optional[int] = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = aux_node
if remaining_word == "":
__lowerCamelCase: Optional[int] = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Any = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: str = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Dict = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCamelCase: List[Any] = list(self.nodes.values() )[0]
__lowerCamelCase: Any = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCamelCase: Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCamelCase: int = False
# If there is 1 edge, we merge it with its child
else:
__lowerCamelCase: Union[str, Any] = list(incoming_node.nodes.values() )[0]
__lowerCamelCase: List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCamelCase: Union[str, Any] = merging_node.nodes
return True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ) -> bool:
__lowerCamelCase: Optional[int] = """banana bananas bandana band apple all beast""".split()
__lowerCamelCase: Optional[Any] = RadixNode()
root.insert_many(snake_case )
assert all(root.find(snake_case ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCAmelCase ( ) -> None:
assert test_trie()
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: int = RadixNode()
__lowerCamelCase: str = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(snake_case )
print("""Words:""" , snake_case )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 189
| 1
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowerCAmelCase = logging.getLogger(__name__)
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: str , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Dict=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] )
lowercase__ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCamelCase__ , )
class _a ( UpperCamelCase__ ):
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowercase__ = BertEncoderWithPabee(UpperCamelCase_ )
self.init_weights()
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
lowercase__ = threshold
def lowerCamelCase_ ( self: int , UpperCamelCase_: Dict ) -> Any:
"""simple docstring"""
lowercase__ = patience
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.inference_layers_num / self.inference_instances_num
lowercase__ = (
f'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
f' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Tuple=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: str=None , UpperCamelCase_: str=False , ) -> List[Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowercase__ = input_ids.size()
elif inputs_embeds is not None:
lowercase__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowercase__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase__ = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
lowercase__ = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase__ = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase__ , lowercase__ , lowercase__ = encoder_hidden_states.size()
lowercase__ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase__ = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
lowercase__ = self.invert_attention_mask(UpperCamelCase_ )
else:
lowercase__ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase__ = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
lowercase__ = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
lowercase__ = embedding_output
if self.training:
lowercase__ = []
for i in range(self.config.num_hidden_layers ):
lowercase__ = self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
lowercase__ = self.pooler(UpperCamelCase_ )
lowercase__ = output_layers[i](output_dropout(UpperCamelCase_ ) )
res.append(UpperCamelCase_ )
elif self.patience == 0: # Use all layers for inference
lowercase__ = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowercase__ = self.pooler(encoder_outputs[0] )
lowercase__ = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )]
else:
lowercase__ = 0
lowercase__ = None
lowercase__ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
lowercase__ = self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
lowercase__ = self.pooler(UpperCamelCase_ )
lowercase__ = output_layers[i](UpperCamelCase_ )
if regression:
lowercase__ = logits.detach()
if patient_result is not None:
lowercase__ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
lowercase__ = 0
else:
lowercase__ = logits.detach().argmax(dim=1 )
if patient_result is not None:
lowercase__ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ):
patient_counter += 1
else:
lowercase__ = 0
lowercase__ = logits
if patient_counter == self.patience:
break
lowercase__ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCamelCase__ , )
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[int] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowercase__ = config.num_labels
lowercase__ = BertModelWithPabee(UpperCamelCase_ )
lowercase__ = nn.Dropout(config.hidden_dropout_prob )
lowercase__ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Any=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: List[str]=None , ) -> List[str]:
"""simple docstring"""
lowercase__ = self.bert(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase__ = (logits[-1],)
if labels is not None:
lowercase__ = None
lowercase__ = 0
for ix, logits_item in enumerate(UpperCamelCase_ ):
if self.num_labels == 1:
# We are doing regression
lowercase__ = MSELoss()
lowercase__ = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
lowercase__ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase__ = (total_loss / total_weights,) + outputs
return outputs
| 43
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
a_ : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.feature_extractor
SCREAMING_SNAKE_CASE = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(_UpperCamelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE = 'weight'
else:
SCREAMING_SNAKE_CASE = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE = int(items[1] )
else:
SCREAMING_SNAKE_CASE = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
SCREAMING_SNAKE_CASE = value
logger.info(f"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(
_UpperCamelCase , add_adapter=_UpperCamelCase , adapter_stride=_UpperCamelCase , adapter_kernel_size=_UpperCamelCase , use_auth_token=_UpperCamelCase , output_hidden_size=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = MBartConfig.from_pretrained(_UpperCamelCase )
# load model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , use_auth_token=_UpperCamelCase )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE = WavaVecaModel(_UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , _UpperCamelCase )
# load decoder weights
SCREAMING_SNAKE_CASE = MBartForCausalLM(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_UpperCamelCase )
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
SCREAMING_SNAKE_CASE = SpeechEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = MBartaaTokenizer(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE = 'mbart50'
SCREAMING_SNAKE_CASE = 'wav2vec2'
SCREAMING_SNAKE_CASE = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE = 25_00_04
SCREAMING_SNAKE_CASE = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE = SpeechEncoderDecoderConfig.from_dict(_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
feature_extractor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config")
a_ : Dict = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 439
| 0
|
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(snake_case__ , int(b / 2 ) ) * actual_power(snake_case__ , int(b / 2 ) )
else:
return a * actual_power(snake_case__ , int(b / 2 ) ) * actual_power(snake_case__ , int(b / 2 ) )
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(snake_case__ , snake_case__ )
return actual_power(snake_case__ , snake_case__ )
if __name__ == "__main__":
print(power(-2, -3))
| 604
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 42
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self ,a_ = 16 ,a_ = 88 ,a_ = None ,a_ = None ,a_ = 1 ,a_ = 0.0 ,a_ = 32 ,a_ = None ,a_ = False ,a_ = None ,a_ = "geglu" ,a_ = True ,a_ = True ,):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=a_ ,num_channels=a_ ,eps=1e-6 ,affine=a_ )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
a_ ,a_ ,a_ ,dropout=a_ ,cross_attention_dim=a_ ,activation_fn=a_ ,attention_bias=a_ ,double_self_attention=a_ ,norm_elementwise_affine=a_ ,)
for d in range(a_ )
] )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,a_=None ,a_=None ,a_=1 ,a_=None ,a_ = True ,):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(a_ ,a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,2 ,1 ,3 ,4 )
lowerCAmelCase__ = self.norm(a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,a_ ,a_ )
lowerCAmelCase__ = self.proj_in(a_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,class_labels=a_ ,)
# 3. Output
lowerCAmelCase__ = self.proj_out(a_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(a_ ,a_ ,a_ ,a_ ,a_ )
.permute(0 ,3 ,4 ,1 ,2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a_ )
| 604
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , )-> List[Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self )-> List[Any]:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )-> str:
UpperCamelCase_ = BioGptModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> List[str]:
UpperCamelCase_ = BioGptForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase )-> Any:
UpperCamelCase_ = BioGptModel(config=_lowercase )
model.to(_lowercase )
model.eval()
# create attention mask
UpperCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=_lowercase )
UpperCamelCase_ = self.seq_length // 2
UpperCamelCase_ = 0
# first forward pass
UpperCamelCase_ , UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCamelCase_ = ids_tensor((1,) , _lowercase ).item() + 1
UpperCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCamelCase_ = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_lowercase )] , dim=1 , )
# get two different outputs
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )["last_hidden_state"]
UpperCamelCase_ = model(_lowercase , past_key_values=_lowercase , attention_mask=_lowercase )["last_hidden_state"]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase )-> Tuple:
UpperCamelCase_ = BioGptModel(config=_lowercase ).to(_lowercase ).eval()
UpperCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=_lowercase )
# first forward pass
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )["last_hidden_state"]
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[
"last_hidden_state"
]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , _lowercase=False )-> Any:
UpperCamelCase_ = BioGptForCausalLM(_lowercase )
model.to(_lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCamelCase_ = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase_ ( self , _lowercase , *_lowercase )-> Tuple:
UpperCamelCase_ = BioGptModel(_lowercase )
UpperCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase )-> int:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = BioGptForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase_ :List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ :Union[str, Any] = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :Optional[Any] = False
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = BioGptModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_lowercase )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_lowercase , gradient_checkpointing=_lowercase )
def UpperCAmelCase_ ( self )-> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_lowercase )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_lowercase )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_lowercase )
UpperCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase_ = "left"
# Define PAD Token = EOS Token = 50256
UpperCamelCase_ = tokenizer.eos_token
UpperCamelCase_ = model.config.eos_token_id
# use different length sentences to test batching
UpperCamelCase_ = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="pt" , padding=_lowercase )
UpperCamelCase_ = inputs["input_ids"].to(_lowercase )
UpperCamelCase_ = model.generate(
input_ids=_lowercase , attention_mask=inputs["attention_mask"].to(_lowercase ) , )
UpperCamelCase_ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(_lowercase )
UpperCamelCase_ = model.generate(input_ids=_lowercase )
UpperCamelCase_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
UpperCamelCase_ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(_lowercase )
UpperCamelCase_ = model.generate(input_ids=_lowercase , max_length=model.config.max_length - num_paddings )
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
UpperCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowercase )
UpperCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowercase )
UpperCamelCase_ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase_ ( self )-> Dict:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = BioGptModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_lowercase )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = BioGptForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "multi_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(_lowercase )
UpperCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase_ = BioGptForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
UpperCamelCase_ = torch.tensor([[2, 4_805, 9, 656, 21]] )
UpperCamelCase_ = model(_lowercase )[0]
UpperCamelCase_ = 42_384
UpperCamelCase_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _lowercase )
UpperCamelCase_ = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_lowercase )
torch.manual_seed(0 )
UpperCamelCase_ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(_lowercase )
UpperCamelCase_ = model.generate(
**_lowercase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=_lowercase , )
UpperCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowercase )
UpperCamelCase_ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(_lowercase , _lowercase )
| 628
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 628
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = {'''facebook/bart-base''': BartForConditionalGeneration}
UpperCamelCase_ = {'''facebook/bart-base''': BartTokenizer}
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_a , default=_a , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_a , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_a , default=_a , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_a , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_a , )
parser.add_argument(
"""--config_name""" , type=_a , default=_a , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_a , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_a , default=_a , help="""Where to store the final ONNX file.""" )
UpperCAmelCase_ : int = parser.parse_args()
return args
def lowerCamelCase_ ( _a : str , _a : Tuple="cpu" ):
'''simple docstring'''
UpperCAmelCase_ : Any = model_dict[model_name].from_pretrained(_a ).to(_a )
UpperCAmelCase_ : Dict = tokenizer_dict[model_name].from_pretrained(_a )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = 0
return huggingface_model, tokenizer
def lowerCamelCase_ ( _a : Tuple , _a : Optional[int] , _a : int , _a : Any , _a : Optional[Any] ):
'''simple docstring'''
model.eval()
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : str = torch.jit.script(BARTBeamSearchGenerator(_a ) )
with torch.no_grad():
UpperCAmelCase_ : Tuple = """My friends are cool but they eat too many carbs."""
UpperCAmelCase_ : Optional[int] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
UpperCAmelCase_ : Any = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_a , max_length=_a , early_stopping=_a , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_a , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _a , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_a , )
logger.info("""Model exported to {}""".format(_a ) )
UpperCAmelCase_ : Dict = remove_dup_initializers(os.path.abspath(_a ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_a ) )
UpperCAmelCase_ : List[str] = onnxruntime.InferenceSession(_a )
UpperCAmelCase_ : Union[str, Any] = ort_sess.run(
_a , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_a ),
"""max_length""": np.array(_a ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = parse_args()
UpperCAmelCase_ : int = 5
UpperCAmelCase_ : List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : List[Any] = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_tokenizer(args.model_name_or_path , _a )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_a )
if args.max_length:
UpperCAmelCase_ : Dict = args.max_length
if args.num_beams:
UpperCAmelCase_ : Optional[Any] = args.num_beams
if args.output_file_path:
UpperCAmelCase_ : str = args.output_file_path
else:
UpperCAmelCase_ : str = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_a , _a , _a , _a , _a )
if __name__ == "__main__":
main()
| 322
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Dict = VOCAB_FILES_NAMES
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRContextEncoderTokenizer
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Tuple = VOCAB_FILES_NAMES
A__ : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Optional[Any] = DPRQuestionEncoderTokenizer
UpperCamelCase_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase_ = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__snake_case )
class _snake_case :
'''simple docstring'''
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Optional[str] = None ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Union[bool, str] = False ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[Union[str, TensorType]] = None ,lowerCamelCase_: Optional[bool] = None ,**lowerCamelCase_: Optional[Any] ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
elif titles is None or texts is None:
UpperCAmelCase_ : Tuple = titles if texts is None else texts
return super().__call__(
lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase_ : Any = titles if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [titles]
UpperCAmelCase_ : Tuple = texts if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [texts]
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : int = questions if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else [questions] * n_passages
assert len(lowerCamelCase_ ) == len(
lowerCamelCase_ ), F'''There should be as many titles than texts but got {len(lowerCamelCase_ )} titles and {len(lowerCamelCase_ )} texts.'''
UpperCAmelCase_ : int = super().__call__(lowerCamelCase_ ,lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""]
UpperCAmelCase_ : str = super().__call__(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ )["""input_ids"""]
UpperCAmelCase_ : Any = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : List[str] = attention_mask
return self.pad(lowerCamelCase_ ,padding=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors=lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: BatchEncoding ,lowerCamelCase_: DPRReaderOutput ,lowerCamelCase_: int = 16 ,lowerCamelCase_: int = 64 ,lowerCamelCase_: int = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Optional[int] = reader_input["""input_ids"""]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = sorted(range(lowerCamelCase_ ) ,reverse=lowerCamelCase_ ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : Optional[int] = len(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=lowerCamelCase_ ,top_spans=lowerCamelCase_ ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=lowerCamelCase_ ,start_index=lowerCamelCase_ ,end_index=lowerCamelCase_ ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(lowerCamelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def A__ ( self: Any ,lowerCamelCase_: List[int] ,lowerCamelCase_: List[int] ,lowerCamelCase_: int ,lowerCamelCase_: int ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Union[str, Any] = []
for start_index, start_score in enumerate(lowerCamelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : Optional[int] = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_ : x[1] ,reverse=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase_ : Any = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
A__ : int = VOCAB_FILES_NAMES
A__ : Tuple = READER_PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
A__ : int = ["input_ids", "attention_mask"]
A__ : str = DPRReaderTokenizer
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
lowerCamelCase_ = len(__UpperCamelCase )
# We need to create solution object to save path.
lowerCamelCase_ = [[0 for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )]
lowerCamelCase_ = run_maze(__UpperCamelCase ,0 ,0 ,__UpperCamelCase )
if solved:
print('\n'.join(str(__UpperCamelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
lowerCamelCase_ = len(__UpperCamelCase )
# Final check point.
if i == j == (size - 1):
lowerCamelCase_ = 1
return True
lowerCamelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase_ = 1
# check for directions
if (
run_maze(__UpperCamelCase ,i + 1 ,__UpperCamelCase ,__UpperCamelCase )
or run_maze(__UpperCamelCase ,__UpperCamelCase ,j + 1 ,__UpperCamelCase )
or run_maze(__UpperCamelCase ,i - 1 ,__UpperCamelCase ,__UpperCamelCase )
or run_maze(__UpperCamelCase ,__UpperCamelCase ,j - 1 ,__UpperCamelCase )
):
return True
lowerCamelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def SCREAMING_SNAKE_CASE_ ( __A : float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
return quad(__A , 0 , __A , args=(__A) )[0]
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float ) -> float:
return math.pow(__A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 418
| 0
|
'''simple docstring'''
def __UpperCamelCase( _A : str , _A : str ):
UpperCAmelCase__ : str = len(_A ) + 1
UpperCAmelCase__ : Dict = len(_A ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase__ : int = [[0 for i in range(_A )] for j in range(_A )]
# since string of zero length match pattern of zero length
UpperCAmelCase__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _A ):
UpperCAmelCase__ : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _A ):
UpperCAmelCase__ : Optional[int] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _A ):
for j in range(1 , _A ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase__ : Optional[int] = dp[i - 1][j]
else:
UpperCAmelCase__ : Tuple = 0
else:
UpperCAmelCase__ : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
UpperCamelCase__ : Union[str, Any] = 'aab'
UpperCamelCase__ : Union[str, Any] = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 702
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase( _A : Optional[int] , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Optional[int]=True , _A : List[Any]="pt" ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {'''add_prefix_space''': True} if isinstance(_A , _A ) and not line.startswith(''' ''' ) else {}
UpperCAmelCase__ : str = padding_side
return tokenizer(
[line] , max_length=_A , padding='''max_length''' if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def __UpperCamelCase( _A : Any , _A : Union[str, Any] , _A : str=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="train" ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="" ,) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = Path(lowerCamelCase_ ).joinpath(type_path + '''.source''' )
UpperCAmelCase__ : Optional[int] = Path(lowerCamelCase_ ).joinpath(type_path + '''.target''' )
UpperCAmelCase__ : Tuple = self.get_char_lens(self.src_file )
UpperCAmelCase__ : Union[str, Any] = max_source_length
UpperCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
UpperCAmelCase__ : str = tokenizer
UpperCAmelCase__ : Optional[int] = prefix
if n_obs is not None:
UpperCAmelCase__ : Optional[int] = self.src_lens[:n_obs]
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Dict = tgt_lang
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1
UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,lowerCamelCase_ ).rstrip('''\n''' )
UpperCAmelCase__ : List[str] = linecache.getline(str(self.tgt_file ) ,lowerCamelCase_ ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
)
UpperCAmelCase__ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
UpperCAmelCase__ : List[Any] = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_source_length ,'''right''' )
UpperCAmelCase__ : Dict = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_target_length ,'''right''' )
UpperCAmelCase__ : Any = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : Any = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : int = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ) -> Any:
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
UpperCAmelCase__ : int = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCAmelCase__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCAmelCase__ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Dict = trim_batch(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = trim_batch(lowerCamelCase_ ,lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : str = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCamelCase__ : int = getLogger(__name__)
def __UpperCamelCase( _A : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = get_git_info()
save_json(_A , os.path.join(_A , '''git_log.json''' ) )
def __UpperCamelCase( _A : Tuple , _A : Optional[Any] , _A : List[Any]=4 , **_A : Optional[Any] ):
'''simple docstring'''
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=_A , **_A )
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : int = git.Repo(search_parent_directories=_A )
UpperCAmelCase__ : int = {
'''repo_id''': str(_A ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase( _A : Callable , _A : Iterable ):
'''simple docstring'''
return list(map(_A , _A ) )
def __UpperCamelCase( _A : str , _A : str ):
'''simple docstring'''
with open(_A , '''wb''' ) as f:
return pickle.dump(_A , _A )
def __UpperCamelCase( _A : int ):
'''simple docstring'''
def remove_articles(_A : int ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , _A )
def white_space_fix(_A : List[Any] ):
return " ".join(text.split() )
def remove_punc(_A : Tuple ):
UpperCAmelCase__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __UpperCamelCase( _A : Optional[Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = normalize_answer(_A ).split()
UpperCAmelCase__ : str = normalize_answer(_A ).split()
UpperCAmelCase__ : str = Counter(_A ) & Counter(_A )
UpperCAmelCase__ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : Union[str, Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : List[Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase( _A : Dict , _A : Tuple ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def __UpperCamelCase( _A : List[str] , _A : List[str] ):
'''simple docstring'''
assert len(_A ) == len(_A )
UpperCAmelCase__ : Optional[int] = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def __UpperCamelCase( _A : int ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def __UpperCamelCase( _A : str , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_A ) )
delattr(_A , _A )
continue
UpperCAmelCase__ : int = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 496
| 0
|
"""simple docstring"""
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 373
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : str ) -> bool:
"""simple docstring"""
UpperCAmelCase = [int(lowerCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(lowerCAmelCase ) == 4 and all(0 <= int(lowerCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input().strip()
SCREAMING_SNAKE_CASE_ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F'{ip} is a {valid_or_invalid} IP v4 address.')
| 373
| 1
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase__ ( ) -> Dict:
"""simple docstring"""
snake_case = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def lowerCAmelCase__ ( _UpperCamelCase : Dict = 2_0_0_0_0_0_0 ) -> List[str]:
"""simple docstring"""
return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase__ ( _UpperCamelCase : Namespace ) -> List[str]:
"""simple docstring"""
return TrainCommand(_UpperCamelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
@staticmethod
def snake_case ( lowerCAmelCase ):
"""simple docstring"""
snake_case = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=lowerCAmelCase , required=lowerCAmelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=lowerCAmelCase , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=lowerCAmelCase , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=lowerCAmelCase , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=lowerCAmelCase , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=lowerCAmelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=lowerCAmelCase , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=lowerCAmelCase , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=lowerCAmelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=lowerCAmelCase , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=lowerCAmelCase , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=lowerCAmelCase , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=lowerCAmelCase , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = logging.get_logger('transformers-cli/training' )
snake_case = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=lowerCAmelCase )
snake_case = args.output
snake_case = args.column_label
snake_case = args.column_text
snake_case = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
snake_case = args.validation_split
snake_case = args.train_batch_size
snake_case = args.valid_batch_size
snake_case = args.learning_rate
snake_case = args.adam_epsilon
def snake_case ( self ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self ):
"""simple docstring"""
raise NotImplementedError
def snake_case ( self ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 104
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _A ( __snake_case :Dict , __snake_case :Optional[int]=None ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , headers=__snake_case ).json()
__SCREAMING_SNAKE_CASE = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
__SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = requests.get(url + f'''&page={i + 2}''' , headers=__snake_case ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _A ( __snake_case :Union[str, Any] , __snake_case :Dict=None ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , headers=__snake_case ).json()
__SCREAMING_SNAKE_CASE = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
__SCREAMING_SNAKE_CASE = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = requests.get(url + f'''&page={i + 2}''' , headers=__snake_case ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def _A ( __snake_case :str , __snake_case :int , __snake_case :Union[str, Any] , __snake_case :Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , headers=__snake_case , allow_redirects=__snake_case )
__SCREAMING_SNAKE_CASE = result.headers["Location"]
__SCREAMING_SNAKE_CASE = requests.get(__snake_case , allow_redirects=__snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , f'''{artifact_name}.zip''' )
with open(__snake_case , "wb" ) as fp:
fp.write(response.content )
def _A ( __snake_case :List[Any] , __snake_case :Dict=None ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = None
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__snake_case ) as f:
for line in f:
__SCREAMING_SNAKE_CASE = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__SCREAMING_SNAKE_CASE = line[: line.index(": " )]
__SCREAMING_SNAKE_CASE = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
__SCREAMING_SNAKE_CASE = line[len("FAILED " ) :]
failed_tests.append(__snake_case )
elif filename == "job_name.txt":
__SCREAMING_SNAKE_CASE = line
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(__snake_case )} for `errors` '''
f'''and {len(__snake_case )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
__SCREAMING_SNAKE_CASE = None
if job_name and job_links:
__SCREAMING_SNAKE_CASE = job_links.get(__snake_case , __snake_case )
# A list with elements of the form (line of error, error, failed test)
__SCREAMING_SNAKE_CASE = [x + [y] + [job_link] for x, y in zip(__snake_case , __snake_case )]
return result
def _A ( __snake_case :Tuple , __snake_case :Union[str, Any]=None ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [os.path.join(__snake_case , __snake_case ) for p in os.listdir(__snake_case ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__snake_case , job_links=__snake_case ) )
return errors
def _A ( __snake_case :Optional[int] , __snake_case :Dict=None ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Counter()
counter.update([x[1] for x in logs] )
__SCREAMING_SNAKE_CASE = counter.most_common()
__SCREAMING_SNAKE_CASE = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__SCREAMING_SNAKE_CASE = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
__SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda __snake_case : item[1]["count"] , reverse=__snake_case ) )
return r
def _A ( __snake_case :int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = test.split("::" )[0]
if test.startswith("tests/models/" ):
__SCREAMING_SNAKE_CASE = test.split("/" )[2]
else:
__SCREAMING_SNAKE_CASE = None
return test
def _A ( __snake_case :Tuple , __snake_case :Any=None ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [(x[0], x[1], get_model(x[2] )) for x in logs]
__SCREAMING_SNAKE_CASE = [x for x in logs if x[2] is not None]
__SCREAMING_SNAKE_CASE = {x[2] for x in logs}
__SCREAMING_SNAKE_CASE = {}
for test in tests:
__SCREAMING_SNAKE_CASE = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__SCREAMING_SNAKE_CASE = counter.most_common()
__SCREAMING_SNAKE_CASE = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__SCREAMING_SNAKE_CASE = sum(error_counts.values() )
if n_errors > 0:
__SCREAMING_SNAKE_CASE = {"count": n_errors, "errors": error_counts}
__SCREAMING_SNAKE_CASE = dict(sorted(r.items() , key=lambda __snake_case : item[1]["count"] , reverse=__snake_case ) )
return r
def _A ( __snake_case :List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "| no. | error | status |"
__SCREAMING_SNAKE_CASE = "|-:|:-|:-|"
__SCREAMING_SNAKE_CASE = [header, sep]
for error in reduced_by_error:
__SCREAMING_SNAKE_CASE = reduced_by_error[error]["count"]
__SCREAMING_SNAKE_CASE = f'''| {count} | {error[:100]} | |'''
lines.append(__snake_case )
return "\n".join(__snake_case )
def _A ( __snake_case :Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "| model | no. of errors | major error | count |"
__SCREAMING_SNAKE_CASE = "|-:|-:|-:|-:|"
__SCREAMING_SNAKE_CASE = [header, sep]
for model in reduced_by_model:
__SCREAMING_SNAKE_CASE = reduced_by_model[model]["count"]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(reduced_by_model[model]["errors"].items() )[0]
__SCREAMING_SNAKE_CASE = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__snake_case )
return "\n".join(__snake_case )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_snake_case : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case : Tuple = get_job_links(args.workflow_run_id, token=args.token)
_snake_case : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case : int = k.find(' / ')
_snake_case : str = k[index + len(' / ') :]
_snake_case : Optional[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case : int = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case : Optional[Any] = reduce_by_error(errors)
_snake_case : List[Any] = reduce_by_model(errors)
_snake_case : str = make_github_table(reduced_by_error)
_snake_case : List[str] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693
|
from math import sqrt
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def _A ( __snake_case :int = 1_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 693
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A__ ( lowercase: Namespace ) -> Optional[Any]:
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name )
_lowercase : Optional[int] ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
A : Any =parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , *SCREAMING_SNAKE_CASE__ : Tuple , ) -> Tuple:
A : Optional[Any] =logging.get_logger('transformers-cli/converting' )
self._logger.info(f'Loading model {model_type}' )
A : List[Any] =model_type
A : Any =tf_checkpoint
A : Optional[int] =pytorch_dump_output
A : Dict =config
A : str =finetuning_task_name
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
if "ckpt" in self._tf_checkpoint.lower():
A : Optional[int] =self._tf_checkpoint
A : Optional[Any] =''
else:
A : str =self._tf_checkpoint
A : Dict =''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE__ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 661
|
_lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ):
UpperCamelCase : int = []
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_init_end""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_train_begin""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_train_end""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_epoch_begin""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_epoch_end""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_step_begin""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_step_end""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_evaluate""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_predict""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_save""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_log""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
self.events.append("""on_prediction_step""" )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : Any = tempfile.mkdtemp()
def a_ ( self ):
shutil.rmtree(self.output_dir )
def a_ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = RegressionDataset(length=snake_case__ )
UpperCamelCase : Any = RegressionDataset(length=snake_case__ )
UpperCamelCase : Any = RegressionModelConfig(a=snake_case__ , b=snake_case__ )
UpperCamelCase : Union[str, Any] = RegressionPreTrainedModel(snake_case__ )
UpperCamelCase : str = TrainingArguments(self.output_dir , disable_tqdm=snake_case__ , report_to=[] , **snake_case__ )
return Trainer(
snake_case__ , snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , callbacks=snake_case__ , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
# Order doesn't matter
UpperCamelCase : List[Any] = sorted(snake_case__ , key=lambda SCREAMING_SNAKE_CASE_ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ )
UpperCamelCase : Optional[int] = sorted(snake_case__ , key=lambda SCREAMING_SNAKE_CASE_ : cb.__name__ if isinstance(snake_case__ , snake_case__ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case__ , snake_case__ ):
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ):
self.assertEqual(snake_case__ , cba.__class__ )
elif not isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
self.assertEqual(cba.__class__ , snake_case__ )
else:
self.assertEqual(snake_case__ , snake_case__ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = ["on_init_end", "on_train_begin"]
UpperCamelCase : int = 0
UpperCamelCase : Any = len(trainer.get_eval_dataloader() )
UpperCamelCase : Optional[Any] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a_ ( self ):
UpperCamelCase : Dict = self.get_trainer()
UpperCamelCase : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# Callbacks passed at init are added to the default callbacks
UpperCamelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCamelCase : Union[str, Any] = self.get_trainer(disable_tqdm=snake_case__ )
UpperCamelCase : Optional[int] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
def a_ ( self ):
UpperCamelCase : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCamelCase : Tuple = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case__ )
expected_callbacks.remove(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
UpperCamelCase : Any = self.get_trainer()
UpperCamelCase : int = trainer.pop_callback(snake_case__ )
self.assertEqual(cb.__class__ , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
trainer.add_callback(snake_case__ )
expected_callbacks.insert(0 , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
# We can also add, pop, or remove by instance
UpperCamelCase : Any = self.get_trainer()
UpperCamelCase : Union[str, Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case__ )
expected_callbacks.remove(snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
UpperCamelCase : str = self.get_trainer()
UpperCamelCase : List[Any] = trainer.callback_handler.callbacks[0]
UpperCamelCase : List[Any] = trainer.pop_callback(snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
trainer.add_callback(snake_case__ )
expected_callbacks.insert(0 , snake_case__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case__ )
def a_ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case__ )
UpperCamelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCamelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# Independent log/save/eval
UpperCamelCase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCamelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
UpperCamelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCamelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
UpperCamelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
UpperCamelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
UpperCamelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
UpperCamelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# A bit of everything
UpperCamelCase : Tuple = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
UpperCamelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case__ , self.get_expected_events(snake_case__ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
UpperCamelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case__ ) in warn_mock.call_args[0][0]
| 499
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Dict = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
a : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
a : Dict = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
a : List[Any] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 2
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__="##" , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
lowercase__ : Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowercase__ : Any= getattr(snake_case__ , normalizer_state.pop("type" ) )
lowercase__ : Optional[Any]= do_lower_case
lowercase__ : Optional[Any]= strip_accents
lowercase__ : Optional[int]= tokenize_chinese_chars
lowercase__ : Optional[int]= normalizer_class(**snake_case__ )
lowercase__ : Optional[int]= do_lower_case
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
lowercase__ : str= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : int= [self.sep_token_id]
lowercase__ : List[str]= [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 218
| 0
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCAmelCase_ (lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase__ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = emb.weight.shape
lowerCAmelCase__ = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
lowerCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ (lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.load(lowercase__ , map_location='''cpu''' )
lowerCAmelCase__ = Namespace(**checkpoint['''cfg''']['''model'''] )
lowerCAmelCase__ = checkpoint['''model''']
remove_ignore_keys_(lowercase__ )
lowerCAmelCase__ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
lowerCAmelCase__ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
lowerCAmelCase__ = XGLMConfig(
vocab_size=lowercase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase__ = XGLMForCausalLM(lowercase__ )
lowerCAmelCase__ = model.load_state_dict(lowercase__ , strict=lowercase__ )
print(lowercase__ )
lowerCAmelCase__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCAmelCase : List[str] = parser.parse_args()
_UpperCAmelCase : Union[str, Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 288
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 288
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase_ = "src/diffusers"
UpperCAmelCase_ = "."
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase_ = spec.loader.load_module()
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
"""simple docstring"""
return line.startswith(SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , SCREAMING_SNAKE_CASE_ ) is not None
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = object_name.split('''.''' )
_UpperCAmelCase = 0
# First let's find the module where our object lives.
_UpperCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE_ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , F'''{module}.py''' ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase = ''''''
_UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE_ ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE_ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
UpperCAmelCase_ = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
UpperCAmelCase_ = re.compile(r"<FILL\s+[^>]*>")
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = code.split('''\n''' )
_UpperCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE_ ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE_ ) ) > 0
if has_indent:
_UpperCAmelCase = F'''class Bla:\n{code}'''
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE_ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def A__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> Tuple:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups()
_UpperCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = get_indent(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase = theoretical_indent
_UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE_ ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
break
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _should_continue(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and re.search(F'''^{indent}# End copy''' , SCREAMING_SNAKE_CASE_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
_UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE_ )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE_ ) is None]
_UpperCAmelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
_UpperCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups()
_UpperCAmelCase = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if option.strip() == "all-casing":
_UpperCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
return diffs
def A__ ( SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '''**/*.py''' ) , recursive=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
for filename in all_files:
_UpperCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 32
|
import operator as op
lowerCamelCase = """scaler.pt"""
lowerCamelCase = """pytorch_model"""
lowerCamelCase = """random_states"""
lowerCamelCase = """optimizer"""
lowerCamelCase = """scheduler"""
lowerCamelCase = """pytorch_model.bin"""
lowerCamelCase = """pytorch_model.bin.index.json"""
lowerCamelCase = """model.safetensors"""
lowerCamelCase = """model.safetensors.index.json"""
lowerCamelCase = """1.10.2"""
lowerCamelCase = """py38"""
lowerCamelCase = """4.17.0"""
lowerCamelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCamelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCamelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCamelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCamelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCamelCase = """2.0.1"""
lowerCamelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCamelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCamelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCamelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCamelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCamelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 191
| 0
|
from math import sqrt
def lowerCamelCase_ ( _lowercase = 1_000_000 ) -> int:
__A : int = 0
__A : int = 0
__A : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowercase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 387
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase_ ( _lowercase ) -> Tuple:
__A : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def lowerCamelCase_ ( _lowercase ) -> int:
__A , __A : Dict = emb.weight.shape
__A : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
__A : int = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowercase ) -> int:
__A : Union[str, Any] = torch.load(_lowercase , map_location="cpu" )
__A : Any = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__A : List[Any] = mam_aaa["model"]
remove_ignore_keys_(_lowercase )
__A : Tuple = state_dict["encoder.embed_tokens.weight"].shape[0]
__A : Any = MaMaaaConfig(
vocab_size=_lowercase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
__A : Tuple = state_dict["decoder.embed_tokens.weight"]
__A : str = MaMaaaForConditionalGeneration(_lowercase )
model.model.load_state_dict(_lowercase , strict=_lowercase )
__A : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase = parser.parse_args()
UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 387
| 1
|
'''simple docstring'''
import string
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ : int = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ : Union[str, Any] = string.ascii_uppercase.find(__a )
UpperCAmelCase_ : Optional[int] = num - key
if num < 0:
UpperCAmelCase_ : Any = num + len(string.ascii_uppercase )
UpperCAmelCase_ : Optional[int] = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ : Optional[int] = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def a__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = input("Encrypted message: " )
UpperCAmelCase_ : Union[str, Any] = message.upper()
decrypt(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 71
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class a_ ( snake_case ):
UpperCAmelCase : Any = """visual_bert"""
def __init__( self : str , a_ : Union[str, Any]=3_0_5_2_2 , a_ : str=7_6_8 , a_ : int=5_1_2 , a_ : Optional[int]=1_2 , a_ : List[str]=1_2 , a_ : Any=3_0_7_2 , a_ : int="gelu" , a_ : Optional[Any]=0.1 , a_ : str=0.1 , a_ : List[str]=5_1_2 , a_ : Optional[Any]=2 , a_ : List[str]=0.0_2 , a_ : Optional[Any]=1E-1_2 , a_ : Tuple=False , a_ : Tuple=True , a_ : str=1 , a_ : Optional[int]=0 , a_ : List[str]=2 , **a_ : Optional[int] , ) -> Optional[int]:
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
snake_case: int =vocab_size
snake_case: Union[str, Any] =max_position_embeddings
snake_case: Optional[Any] =hidden_size
snake_case: str =visual_embedding_dim
snake_case: List[Any] =num_hidden_layers
snake_case: Tuple =num_attention_heads
snake_case: List[str] =intermediate_size
snake_case: Any =hidden_act
snake_case: List[str] =hidden_dropout_prob
snake_case: List[Any] =attention_probs_dropout_prob
snake_case: Optional[Any] =initializer_range
snake_case: Union[str, Any] =type_vocab_size
snake_case: Optional[int] =layer_norm_eps
snake_case: str =bypass_transformer
snake_case: str =special_visual_initialize
| 347
|
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: Any =f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCAmelCase )
if number < 1:
snake_case: Tuple =f'''Input value of [number={number}] must be > 0'''
raise ValueError(__UpperCAmelCase )
snake_case: int =1
for i in range(1 , __UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 1
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def snake_case__ ( ):
A : List[Any] = 9
A : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A : List[str] = kruskal(lowerCamelCase_ , lowerCamelCase_ )
A : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase_ ) == sorted(lowerCamelCase_ )
| 542
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase : List[str] = logging.get_logger(__name__)
def snake_case__ ( lowerCamelCase_=None , lowerCamelCase_=None ):
return field(default_factory=lambda: default , metadata=lowerCamelCase_ )
@dataclass
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
UpperCAmelCase_ : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCAmelCase_ : List[int] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCAmelCase_ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
UpperCAmelCase_ : str = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
UpperCAmelCase_ : str = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
UpperCAmelCase_ : str = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
UpperCAmelCase_ : str = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
UpperCAmelCase_ : str = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
UpperCAmelCase_ : str = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
UpperCAmelCase_ : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCAmelCase_ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case ( self ) -> Union[str, Any]:
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , __UpperCAmelCase , )
def snake_case ( self ) -> Union[str, Any]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def snake_case ( self ) -> int:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 542
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : List[str] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_ : str =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 153
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__SCREAMING_SNAKE_CASE = Lock()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 ,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ : Any =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ : Tuple =min(lowerCAmelCase_ ,lowerCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ : Any =max(lowerCAmelCase_ ,lowerCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =[]
SCREAMING_SNAKE_CASE_ : Any =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ : Optional[int] =Pipe()
SCREAMING_SNAKE_CASE_ : str =Pipe()
process_array_.append(
Process(
target=lowerCAmelCase_ ,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) ,) )
SCREAMING_SNAKE_CASE_ : Tuple =temp_rs
SCREAMING_SNAKE_CASE_ : List[Any] =temp_rr
for i in range(1 ,len(lowerCAmelCase_ ) - 1 ):
SCREAMING_SNAKE_CASE_ : int =Pipe()
SCREAMING_SNAKE_CASE_ : int =Pipe()
process_array_.append(
Process(
target=lowerCAmelCase_ ,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) ,) )
SCREAMING_SNAKE_CASE_ : Tuple =temp_rs
SCREAMING_SNAKE_CASE_ : Any =temp_rr
process_array_.append(
Process(
target=lowerCAmelCase_ ,args=(
len(lowerCAmelCase_ ) - 1,
arr[len(lowerCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase_ ) - 1],
) ,) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 ,len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =list(range(10 ,0 ,-1 ) )
print('Initial List' )
print(*lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =odd_even_transposition(lowerCAmelCase_ )
print('Sorted List\n' )
print(*lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 153
| 1
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowercase : Union[str, Any] ="""bert-base-cased"""
_lowercase : List[str] ="""google/pegasus-xsum"""
_lowercase : Any =[""" Sam ate lunch today.""", """Sams lunch ingredients."""]
_lowercase : Any =["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
_lowercase : int ="""patrickvonplaten/t5-tiny-random"""
_lowercase : int ="""sshleifer/bart-tiny-random"""
_lowercase : Tuple ="""sshleifer/tiny-mbart"""
_lowercase : Optional[int] ="""sshleifer/tiny-marian-en-de"""
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Dict = """\n""".join(_UpperCAmelCase )
Path(_UpperCAmelCase ).open('w' ).writelines(_UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCAmelCase ,F"{split}.source" ) ,_UpperCAmelCase )
_dump_articles(os.path.join(_UpperCAmelCase ,F"{split}.target" ) ,_UpperCAmelCase )
return tmp_dir
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __a ( self : List[Any] , lowerCamelCase : Dict ):
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(A_ )
lowerCamelCase_ : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase_ : Union[str, Any] = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES )
lowerCamelCase_ : Any = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES )
lowerCamelCase_ : int = 4
lowerCamelCase_ : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase_ : Union[str, Any] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowerCamelCase_ : Dict = SeqaSeqDataset(
A_ , data_dir=A_ , type_path='train' , max_source_length=A_ , max_target_length=A_ , src_lang=A_ , tgt_lang=A_ , )
lowerCamelCase_ : Tuple = DataLoader(A_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(A_ , A_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase_ : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __a ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(A_ )
lowerCamelCase_ : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase_ : int = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES )
lowerCamelCase_ : Optional[Any] = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES )
lowerCamelCase_ : List[Any] = 4
lowerCamelCase_ : Tuple = LegacySeqaSeqDataset(
A_ , data_dir=A_ , type_path='train' , max_source_length=20 , max_target_length=A_ , )
lowerCamelCase_ : int = DataLoader(A_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
lowerCamelCase_ : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase_ : List[Any] = tmp_dir.joinpath('train.source' ).open().readlines()
lowerCamelCase_ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(A_ , A_ , 1_28 , A_ )
lowerCamelCase_ : List[str] = {x.name for x in tmp_dir.iterdir()}
lowerCamelCase_ : Optional[int] = {x.name for x in save_dir.iterdir()}
lowerCamelCase_ : Optional[Any] = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(A_ ) < len(A_ )
assert len(A_ ) == 1
assert len(packed_examples[0] ) == sum(len(A_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __a ( self : str ):
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase_ : Any = self._get_dataset(max_len=64 )
lowerCamelCase_ : Optional[Any] = 64
lowerCamelCase_ : int = ds.make_dynamic_sampler(A_ , required_batch_size_multiple=A_ )
lowerCamelCase_ : int = [len(A_ ) for x in batch_sampler]
assert len(set(A_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(A_ ) == len(A_ ) # no dropped or added examples
lowerCamelCase_ : Optional[Any] = DataLoader(A_ , batch_sampler=A_ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase_ : str = []
lowerCamelCase_ : Tuple = []
for batch in data_loader:
lowerCamelCase_ : Optional[int] = batch["""input_ids"""].shape
lowerCamelCase_ : Optional[int] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase_ : Optional[int] = np.product(batch['input_ids'].shape )
num_src_per_batch.append(A_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(A_ )
assert num_src_per_batch[0] == max(A_ )
if failures:
raise AssertionError(F"too many tokens in {len(A_ )} batches" )
def __a ( self : Tuple ):
lowerCamelCase_ : int = self._get_dataset(max_len=5_12 )
lowerCamelCase_ : Optional[Any] = 2
lowerCamelCase_ : Union[str, Any] = ds.make_sortish_sampler(A_ , shuffle=A_ )
lowerCamelCase_ : List[Any] = DataLoader(A_ , batch_size=A_ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase_ : Dict = DataLoader(A_ , batch_size=A_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=A_ )
lowerCamelCase_ : List[str] = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any]="input_ids" ):
return [batch[k].eq(A_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(A_ , k='labels' ) ) < sum(count_pad_tokens(A_ , k='labels' ) )
assert sum(count_pad_tokens(A_ ) ) < sum(count_pad_tokens(A_ ) )
assert len(A_ ) == len(A_ )
def __a ( self : Optional[int] , lowerCamelCase : Union[str, Any]=10_00 , lowerCamelCase : Dict=1_28 ):
if os.getenv('USE_REAL_DATA' , A_ ):
lowerCamelCase_ : Dict = """examples/seq2seq/wmt_en_ro"""
lowerCamelCase_ : Optional[int] = max_len * 2 * 64
if not Path(A_ ).joinpath('train.len' ).exists():
save_len_file(A_ , A_ )
else:
lowerCamelCase_ : Union[str, Any] = """examples/seq2seq/test_data/wmt_en_ro"""
lowerCamelCase_ : Any = max_len * 4
save_len_file(A_ , A_ )
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(A_ )
lowerCamelCase_ : Tuple = SeqaSeqDataset(
A_ , data_dir=A_ , type_path='train' , max_source_length=A_ , max_target_length=A_ , n_obs=A_ , )
return ds, max_tokens, tokenizer
def __a ( self : Optional[Any] ):
lowerCamelCase_ : List[Any] = self._get_dataset()
lowerCamelCase_ : List[str] = set(DistributedSortishSampler(A_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=A_ ) )
lowerCamelCase_ : Dict = set(DistributedSortishSampler(A_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=A_ ) )
assert idsa.intersection(A_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __a ( self : int , lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(A_ , use_fast=A_ )
if tok_name == MBART_TINY:
lowerCamelCase_ : Union[str, Any] = SeqaSeqDataset(
A_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
lowerCamelCase_ : Optional[int] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase_ : Tuple = SeqaSeqDataset(
A_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
lowerCamelCase_ : Dict = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(A_ ) == 1 if tok_name == BART_TINY else len(A_ ) == 0
| 364
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase : int = logging.get_logger(__name__)
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["pixel_values"]
def __init__( self : Optional[Any] , A_ : bool = True , A_ : Union[int, float] = 1 / 2_55 , A_ : bool = True , A_ : int = 8 , **A_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: str = do_rescale
lowerCamelCase_: List[str] = rescale_factor
lowerCamelCase_: Dict = do_pad
lowerCamelCase_: List[Any] = pad_size
def lowerCAmelCase ( self : Tuple , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Optional[int] , A_ : np.ndarray , A_ : int , A_ : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[int] = get_image_size(A_ )
lowerCamelCase_: Tuple = (old_height // size + 1) * size - old_height
lowerCamelCase_: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(A_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=A_ )
def lowerCAmelCase ( self : str , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : str , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_: Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowerCamelCase_: List[Any] = pad_size if pad_size is not None else self.pad_size
lowerCamelCase_: Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_: List[str] = [to_numpy_array(A_ ) for image in images]
if do_rescale:
lowerCamelCase_: Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_pad:
lowerCamelCase_: List[str] = [self.pad(A_ , size=A_ ) for image in images]
lowerCamelCase_: Any = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_: Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 423
| 0
|
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = len(matrix[0] )
__lowerCAmelCase = min(UpperCAmelCase__ , UpperCAmelCase__ )
for row in range(UpperCAmelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCAmelCase__ ):
__lowerCAmelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCAmelCase__ , UpperCAmelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__lowerCAmelCase = True
for i in range(row + 1 , UpperCAmelCase__ ):
if matrix[i][row] != 0:
__lowerCAmelCase, __lowerCAmelCase = matrix[i], matrix[row]
__lowerCAmelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCAmelCase__ ):
__lowerCAmelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
lowerCamelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
lowerCamelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 102
| 1
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase : Union[str, Any] = '''src/transformers'''
lowerCamelCase : int = '''docs/source/en'''
lowerCamelCase : Tuple = '''.'''
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start prompt.
__lowerCAmelCase = 0
while not lines[start_index].startswith(__snake_case ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
while not lines[end_index].startswith(__snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase : Tuple = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Dict = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase : Any = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : str = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Optional[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __snake_case )
return [m.group(0 ) for m in matches]
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = 2 if text == "✅" or text == "❌" else len(__snake_case )
__lowerCAmelCase = (width - text_length) // 2
__lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __lowerCAmelCase ( ):
__lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowerCAmelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowerCAmelCase = collections.defaultdict(__snake_case )
__lowerCAmelCase = collections.defaultdict(__snake_case )
__lowerCAmelCase = collections.defaultdict(__snake_case )
__lowerCAmelCase = collections.defaultdict(__snake_case )
__lowerCAmelCase = collections.defaultdict(__snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(__snake_case ):
__lowerCAmelCase = None
if attr_name.endswith("Tokenizer" ):
__lowerCAmelCase = slow_tokenizers
__lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
__lowerCAmelCase = fast_tokenizers
__lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(__snake_case ) is not None:
__lowerCAmelCase = tf_models
__lowerCAmelCase = _re_tf_models.match(__snake_case ).groups()[0]
elif _re_flax_models.match(__snake_case ) is not None:
__lowerCAmelCase = flax_models
__lowerCAmelCase = _re_flax_models.match(__snake_case ).groups()[0]
elif _re_pt_models.match(__snake_case ) is not None:
__lowerCAmelCase = pt_models
__lowerCAmelCase = _re_pt_models.match(__snake_case ).groups()[0]
if lookup_dict is not None:
while len(__snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
__lowerCAmelCase = True
break
# Try again after removing the last word in the name
__lowerCAmelCase = "".join(camel_case_split(__snake_case )[:-1] )
# Let's build that table!
__lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowerCAmelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowerCAmelCase = [len(__snake_case ) + 2 for c in columns]
__lowerCAmelCase = max([len(__snake_case ) for name in model_names] ) + 2
# Build the table per se
__lowerCAmelCase = "|" + "|".join([_center_text(__snake_case , __snake_case ) for c, w in zip(__snake_case , __snake_case )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
__lowerCAmelCase = {True: "✅", False: "❌"}
for name in model_names:
__lowerCAmelCase = model_name_to_prefix[name]
__lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__snake_case , __snake_case ) for l, w in zip(__snake_case , __snake_case )] ) + "|\n"
return table
def __lowerCAmelCase ( __snake_case=False ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _find_text_in_file(
filename=os.path.join(__snake_case , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
__lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__snake_case , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase : Optional[int] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 367
|
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = iter(__snake_case )
while True:
__lowerCAmelCase = tuple(itertools.islice(__snake_case , __snake_case ) )
if not chunk:
return
yield chunk
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowerCAmelCase = ""
if len(__snake_case ) < 2:
return dirty
for i in range(len(__snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__snake_case ) & 1:
clean += "X"
return clean
def __lowerCAmelCase ( __snake_case ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__lowerCAmelCase = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__snake_case )
return table
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = prepare_input(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 367
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A = get_tests_dir('fixtures')
class lowerCamelCase_ ( unittest.TestCase ):
def __magic_name__ ( self ):
# A mock response for an HTTP head request to emulate server down
a_ = mock.Mock()
a_ = 500
a_ = {}
a_ = HTTPError
a_ = {}
# Download this model to make sure it's in the cache.
a_ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
a_ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__ ( self ):
# This test is for deprecated behavior and can be removed in v5
a_ = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class lowerCamelCase_ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls ):
a_ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def __magic_name__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __magic_name__ ( self ):
a_ = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
a_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""test-feature-extractor""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
a_ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
a_ = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
a_ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
a_ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( self ):
CustomFeatureExtractor.register_for_auto_class()
a_ = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
a_ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 403
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_A = '\nHuman: <<task>>\n\nAssistant: '
_A = 'huggingface-tools/default-prompts'
_A = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]="run" ) -> int:
"""simple docstring"""
if prompt_or_repo_id is None:
a_ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , UpperCamelCase ) is not None:
return prompt_or_repo_id
a_ = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 403
| 1
|
import argparse
import copy
def _UpperCamelCase ( snake_case__ ) -> Any:
__UpperCAmelCase : List[str] = {}
with open(snake_case__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCAmelCase : int = []
_list.append([line.split()[1], line.split()[2]] )
__UpperCAmelCase : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCAmelCase : Optional[Any] = []
_list.append([line.split()[0], line.split()[2]] )
__UpperCAmelCase : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
with open(snake_case__ ) as f:
__UpperCAmelCase : Optional[int] = f.read(1 )
__UpperCAmelCase : Dict = start_node
__UpperCAmelCase : str = []
__UpperCAmelCase : List[str] = start_node
__UpperCAmelCase : Any = 0
while visiting not in first_solution:
__UpperCAmelCase : Union[str, Any] = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case__ ) and k[0] not in first_solution:
__UpperCAmelCase : Dict = k[1]
__UpperCAmelCase : Dict = k[0]
first_solution.append(snake_case__ )
__UpperCAmelCase : List[Any] = distance_of_first_solution + int(snake_case__ )
__UpperCAmelCase : List[str] = best_node
first_solution.append(snake_case__ )
__UpperCAmelCase : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCAmelCase : Any = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Tuple:
__UpperCAmelCase : int = []
for n in solution[1:-1]:
__UpperCAmelCase : Dict = solution.index(snake_case__ )
for kn in solution[1:-1]:
__UpperCAmelCase : Tuple = solution.index(snake_case__ )
if n == kn:
continue
__UpperCAmelCase : Optional[Any] = copy.deepcopy(snake_case__ )
__UpperCAmelCase : List[Any] = kn
__UpperCAmelCase : Dict = n
__UpperCAmelCase : Dict = 0
for k in _tmp[:-1]:
__UpperCAmelCase : List[Any] = _tmp[_tmp.index(snake_case__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCAmelCase : List[str] = distance + int(i[1] )
_tmp.append(snake_case__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCAmelCase : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda snake_case__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ ) -> Dict:
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Union[str, Any] = first_solution
__UpperCAmelCase : Any = []
__UpperCAmelCase : Optional[int] = distance_of_first_solution
__UpperCAmelCase : List[str] = solution
while count <= iters:
__UpperCAmelCase : Tuple = find_neighborhood(snake_case__, snake_case__ )
__UpperCAmelCase : str = 0
__UpperCAmelCase : int = neighborhood[index_of_best_solution]
__UpperCAmelCase : List[Any] = len(snake_case__ ) - 1
__UpperCAmelCase : List[Any] = False
while not found:
__UpperCAmelCase : Optional[int] = 0
while i < len(snake_case__ ):
if best_solution[i] != solution[i]:
__UpperCAmelCase : List[Any] = best_solution[i]
__UpperCAmelCase : str = solution[i]
break
__UpperCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = best_solution[:-1]
__UpperCAmelCase : int = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCAmelCase : Union[str, Any] = cost
__UpperCAmelCase : str = solution
else:
__UpperCAmelCase : int = index_of_best_solution + 1
__UpperCAmelCase : int = neighborhood[index_of_best_solution]
if len(snake_case__ ) >= size:
tabu_list.pop(0 )
__UpperCAmelCase : List[str] = count + 1
return best_solution_ever, best_cost
def _UpperCamelCase ( snake_case__=None ) -> Dict:
__UpperCAmelCase : Optional[Any] = generate_neighbours(args.File )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = generate_first_solution(
args.File, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = tabu_search(
snake_case__, snake_case__, snake_case__, args.Iterations, args.Size, )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 382
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _snake_case :
def _lowerCamelCase ( self: Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCAmelCase : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__UpperCAmelCase : str = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__UpperCAmelCase : Dict = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowerCamelCase ( self: Any ) -> int:
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : Dict = self.get_dummy_inputs(__lowerCamelCase )
__UpperCAmelCase : Any = inputs["prompt"]
__UpperCAmelCase : Tuple = inputs["generator"]
__UpperCAmelCase : Optional[int] = inputs["num_inference_steps"]
__UpperCAmelCase : Any = inputs["output_type"]
if "image" in inputs:
__UpperCAmelCase : Union[str, Any] = inputs["image"]
else:
__UpperCAmelCase : List[Any] = None
if "mask_image" in inputs:
__UpperCAmelCase : List[str] = inputs["mask_image"]
else:
__UpperCAmelCase : List[Any] = None
if "original_image" in inputs:
__UpperCAmelCase : int = inputs["original_image"]
else:
__UpperCAmelCase : Optional[Any] = None
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pipe.encode_prompt(__lowerCamelCase )
# inputs with prompt converted to embeddings
__UpperCAmelCase : Tuple = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__UpperCAmelCase : Dict = image
if mask_image is not None:
__UpperCAmelCase : Union[str, Any] = mask_image
if original_image is not None:
__UpperCAmelCase : int = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase , __lowerCamelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
__UpperCAmelCase : Any = inputs["generator"]
__UpperCAmelCase : List[Any] = inputs["num_inference_steps"]
__UpperCAmelCase : Dict = inputs["output_type"]
# inputs with prompt converted to embeddings
__UpperCAmelCase : Dict = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__UpperCAmelCase : str = image
if mask_image is not None:
__UpperCAmelCase : Optional[int] = mask_image
if original_image is not None:
__UpperCAmelCase : Optional[int] = original_image
__UpperCAmelCase : List[str] = pipe_loaded(**__lowerCamelCase )[0]
__UpperCAmelCase : int = np.abs(to_np(__lowerCamelCase ) - to_np(__lowerCamelCase ) ).max()
self.assertLess(__lowerCamelCase , 1e-4 )
def _lowerCamelCase ( self: Tuple ) -> Any:
__UpperCAmelCase : str = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : str = self.get_dummy_inputs(__lowerCamelCase )
__UpperCAmelCase : List[str] = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
__UpperCAmelCase : List[Any] = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__lowerCamelCase )
__UpperCAmelCase : int = pipe_loaded(**__lowerCamelCase )[0]
__UpperCAmelCase : Tuple = np.abs(to_np(__lowerCamelCase ) - to_np(__lowerCamelCase ) ).max()
self.assertLess(__lowerCamelCase , 1e-4 )
| 382
| 1
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 703
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 239
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Tuple = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112
|
def a(lowercase__ ):
'''simple docstring'''
snake_case_ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ , snake_case_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
A = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 187
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str = " " ) -> list:
_a : int =[]
_a : Tuple =0
for index, char in enumerate(_UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_a : Union[str, Any] =index + 1
elif index + 1 == len(_UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A__: List[str] = False
class A__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Optional[int] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_a : int =torch.manual_seed(0 )
_a : Any =pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""" , ).images
_a : Optional[int] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a : Tuple =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 506
| 0
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__magic_name__ : Any = logging.getLogger()
def lowercase__ ( _UpperCamelCase) -> Dict:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = os.path.join(_UpperCamelCase , 'all_results.json')
if os.path.exists(_UpperCamelCase):
with open(_UpperCamelCase , 'r') as f:
UpperCamelCase = json.load(_UpperCamelCase)
else:
raise ValueError(F'can\'t find {path}')
return results
__magic_name__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class A__ ( __snake_case ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
import xla_spawn
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = time()
xla_spawn.main()
UpperCamelCase = time()
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def _SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
import xla_spawn
UpperCamelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_SCREAMING_SNAKE_CASE , 'argv' , _SCREAMING_SNAKE_CASE ):
xla_spawn.main()
| 280
|
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive')
# get the generated string sequence
UpperCamelCase = gray_code_sequence_string(_UpperCamelCase)
#
# convert them to integers
for i in range(len(_UpperCamelCase)):
UpperCamelCase = int(sequence[i] , 2)
return sequence
def lowercase__ ( _UpperCamelCase) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCamelCase = gray_code_sequence_string(bit_count - 1)
UpperCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2):
UpperCamelCase = '0' + smaller_sequence[i]
sequence.append(_UpperCamelCase)
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2)):
UpperCamelCase = '1' + smaller_sequence[i]
sequence.append(_UpperCamelCase)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__ ( A ):
"""simple docstring"""
_lowercase : Union[str, Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
_lowercase : List[Any] = '''CIDAS/clipseg-rd64-refined'''
_lowercase : List[Any] = '''image_segmenter'''
_lowercase : str = CLIPSegForImageSegmentation
_lowercase : Optional[int] = ['''image''', '''text''']
_lowercase : Union[str, Any] = ['''image''']
def __init__( self : str , *A_ : Optional[Any] , **A_ : str ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*A_ , **A_ )
def __magic_name__ ( self : Any , A_ : "Image" , A_ : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=A_ , return_tensors="pt" )
def __magic_name__ ( self : List[str] , A_ : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = self.model(**A_ ).logits
return logits
def __magic_name__ ( self : Optional[Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = outputs.cpu().detach().numpy()
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Optional[int] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 714
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , A_ : Optional[Any] , A_ : Dict=1_3 , A_ : str=7 , A_ : Union[str, Any]=True , A_ : int=True , A_ : Any=False , A_ : str=True , A_ : int=9_9 , A_ : int=3_2 , A_ : Optional[int]=5 , A_ : List[str]=4 , A_ : int=6_4 , A_ : Optional[int]="gelu" , A_ : List[Any]=0.1 , A_ : int=0.1 , A_ : List[str]=5_1_2 , A_ : Optional[Any]=1_6 , A_ : int=2 , A_ : Optional[int]=0.02 , A_ : Any=3 , A_ : Optional[Any]=4 , A_ : Union[str, Any]=None , A_ : Union[str, Any]=2 , A_ : Tuple=2 , A_ : Optional[int]=2 , A_ : List[Any]=2 , A_ : List[str]=4 , A_ : Union[str, Any]=1 , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : Union[str, Any] = q_groups
_lowerCAmelCase : Tuple = k_groups
_lowerCAmelCase : str = v_groups
_lowerCAmelCase : Tuple = post_attention_groups
_lowerCAmelCase : Tuple = intermediate_groups
_lowerCAmelCase : List[Any] = output_groups
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__ ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : List[str] , A_ : Optional[int] , A_ : str , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , A_ )
_lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int] , A_ : int , A_ : Dict , A_ : Any , A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : int = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Tuple , A_ : Optional[int] , A_ : Dict , A_ : str , A_ : Tuple , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = SqueezeBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Any = SqueezeBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : Any = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple , A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_choices
_lowerCAmelCase : Dict = SqueezeBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : Optional[Any] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : List[str] = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = SqueezeBertModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=A_ , dim=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ )
@slow
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = SqueezeBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_lowerCAmelCase : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_lowerCAmelCase : List[str] = model(A_ )[0]
_lowerCAmelCase : Any = torch.Size((1, 3) )
self.assertEqual(output.shape , A_ )
_lowerCAmelCase : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-4 ) )
| 503
| 0
|
"""simple docstring"""
from math import pi, sqrt
def _lowerCamelCase ( UpperCAmelCase_ : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(UpperCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(UpperCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _lowerCamelCase ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(UpperCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input("""Gamma of: """))
print(f'gamma({num}) = {gamma(num)}')
print("""\nEnter 0 to exit...""")
| 104
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase__ : Optional[int] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
lowercase__ : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = Path(tmpdirname)
lowercase__ : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase__ : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase__ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase__ : int = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase__ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase__ : Optional[int] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowercase__ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase__ : List[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 123
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Optional[Any] = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCAmelCase_ : Union[str, Any] = {'''mobilebert-uncased''': 512}
lowerCAmelCase_ : str = {}
class __lowerCAmelCase ( __a ):
snake_case : Tuple = VOCAB_FILES_NAMES
snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : List[str] = MobileBertTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : List[Any] = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : Optional[Any] = tokenize_chinese_chars
_UpperCAmelCase : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Tuple = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 707
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCAmelCase : Union[str, Any] = processors[data_args.task_name]()
_UpperCAmelCase : int = processor.get_labels()
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
_UpperCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : int = trainer.evaluate()
_UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 156
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = "Hello, World!"
A_ = "en_XX"
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = Path('''data_bin''' )
lowercase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(UpperCAmelCase ).parent ), checkpoint_file=Path(UpperCAmelCase ).name, _name='''xmod_base''', arch='''xmod_base''', task='''multilingual_masked_lm''', data_name_or_path=str(UpperCAmelCase ), bpe='''sentencepiece''', sentencepiece_model=str(Path(UpperCAmelCase ).parent / '''sentencepiece.bpe.model''' ), src_dict=str(data_dir / '''dict.txt''' ), )
xmod.eval() # disable dropout
print(UpperCAmelCase )
lowercase = xmod.model.encoder.sentence_encoder
lowercase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, '''bottleneck''', 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
lowercase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''', UpperCAmelCase )
lowercase = XmodForSequenceClassification(UpperCAmelCase ) if classification_head else XmodForMaskedLM(UpperCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase = xmod_sent_encoder.embed_tokens.weight
lowercase = xmod_sent_encoder.embed_positions.weight
lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowercase = xmod_sent_encoder.layernorm_embedding.weight
lowercase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase = model.roberta.encoder.layer[i]
lowercase = xmod_sent_encoder.layers[i]
# self attention
lowercase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowercase = xmod_layer.self_attn.q_proj.weight
lowercase = xmod_layer.self_attn.q_proj.bias
lowercase = xmod_layer.self_attn.k_proj.weight
lowercase = xmod_layer.self_attn.k_proj.bias
lowercase = xmod_layer.self_attn.v_proj.weight
lowercase = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowercase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowercase = xmod_layer.self_attn.out_proj.weight
lowercase = xmod_layer.self_attn.out_proj.bias
lowercase = xmod_layer.self_attn_layer_norm.weight
lowercase = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowercase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowercase = xmod_layer.fca.weight
lowercase = xmod_layer.fca.bias
# output
lowercase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowercase = xmod_layer.fca.weight
lowercase = xmod_layer.fca.bias
lowercase = xmod_layer.final_layer_norm.weight
lowercase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowercase = xmod_layer.adapter_layer_norm.weight
lowercase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowercase = bert_output.adapter_modules[lang_code]
lowercase = xmod_layer.adapter_modules[lang_code]
lowercase = from_adapter.fca.weight
lowercase = from_adapter.fca.bias
lowercase = from_adapter.fca.weight
lowercase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowercase = xmod_sent_encoder.layer_norm.weight
lowercase = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowercase = xmod.model.classification_heads['''mnli'''].dense.weight
lowercase = xmod.model.classification_heads['''mnli'''].dense.bias
lowercase = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowercase = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowercase = xmod.model.encoder.lm_head.dense.weight
lowercase = xmod.model.encoder.lm_head.dense.bias
lowercase = xmod.model.encoder.lm_head.layer_norm.weight
lowercase = xmod.model.encoder.lm_head.layer_norm.bias
lowercase = xmod.model.encoder.lm_head.weight
lowercase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase = xmod.encode(UpperCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(UpperCAmelCase )
lowercase = model(UpperCAmelCase )[0]
if classification_head:
lowercase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(UpperCAmelCase ) )
else:
lowercase = xmod.model(UpperCAmelCase, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase = torch.allclose(UpperCAmelCase, UpperCAmelCase, atol=1e-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(UpperCAmelCase ).mkdir(parents=UpperCAmelCase, exist_ok=UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
A_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 604
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _A , unittest.TestCase ):
lowercase = None
lowercase = BloomTokenizerFast
lowercase = BloomTokenizerFast
lowercase = True
lowercase = False
lowercase = 'tokenizer_file'
lowercase = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def __a ( self : str ) -> List[str]:
'''simple docstring'''
super().setUp()
lowercase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Union[str, Any] , **__lowerCamelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __a ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase = self.get_rust_tokenizer()
lowercase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowercase = tokenizer.batch_encode_plus(__lowerCamelCase )['''input_ids''']
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
lowercase = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __a ( self : List[Any] , __lowerCamelCase : List[Any]=6 ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase = '''This is a simple input'''
lowercase = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase = ('''This is a simple input''', '''This is a pair''')
lowercase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.encode(__lowerCamelCase , max_length=__lowerCamelCase )
tokenizer_r.batch_encode_plus(__lowerCamelCase , max_length=__lowerCamelCase )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase = None # Hotfixing padding = None
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCamelCase , tokenizer_r.encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCamelCase , tokenizer_r.batch_encode_plus , __lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' , )
def __a ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_rust_tokenizer()
lowercase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__lowerCamelCase )
lowercase = next(iter(__lowerCamelCase ) )['''premise'''] # pick up one data
lowercase = list(sample_data.values() )
lowercase = list(map(tokenizer.encode , __lowerCamelCase ) )
lowercase = [tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) for x in output_tokens]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __a ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 604
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :Optional[Any] = logging.get_logger(__name__)
a :Optional[Any] = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """open-llama"""
def __init__( self , _a=100_000 , _a=4_096 , _a=11_008 , _a=32 , _a=32 , _a="silu" , _a=2_048 , _a=0.02 , _a=1E-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=False , _a=True , _a=0.1 , _a=0.1 , _a=True , _a=True , _a=None , **_a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = rms_norm_eps
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop(
"""use_memorry_efficient_attention""" , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = use_stable_embedding
SCREAMING_SNAKE_CASE__ : Union[str, Any] = shared_input_output_embedding
SCREAMING_SNAKE_CASE__ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def _a ( self ) -> str:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = self.rope_scaling.get("""type""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rope_scaling.get("""factor""" , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 12
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a :List[Any] = None
a :Optional[int] = logging.get_logger(__name__)
a :Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a :Optional[int] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
a :Dict = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
a :int = "▁"
# Segments (not really needed)
a :Dict = 0
a :Optional[int] = 1
a :Tuple = 2
a :List[str] = 3
a :Optional[Any] = 4
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = """left"""
_SCREAMING_SNAKE_CASE :Optional[Any] = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE__ : List[str] = remove_space
SCREAMING_SNAKE_CASE__ : int = keep_accents
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = False if not self.vocab_file else True
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 12
| 1
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( A_ : Any , A_ : Union[str, Any] , A_ : Any , A_ : Optional[Any]=5 ):
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
a_ : Any = torch.tensor(tokenizer.encode(A_ , add_special_tokens=A_ ) ).unsqueeze(0 ) # Batch size 1
a_ : Optional[Any] = model(A_ )[0] # The last hidden-state is the first element of the output tuple
a_ : Dict = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a_ : Optional[Any] = logits[0, masked_index, :]
a_ : Optional[int] = logits.softmax(dim=0 )
a_ , a_ : Optional[Any] = prob.topk(k=A_ , dim=0 )
a_ : Optional[Any] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A_ ) )] )
a_ : List[str] = tokenizer.mask_token
a_ : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
a_ : List[str] = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(A_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(A_ ) , A_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A_ , A_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__snake_case: Optional[int] = CamembertTokenizer.from_pretrained("camembert-base")
__snake_case: int = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__snake_case: Dict = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 577
|
'''simple docstring'''
import numpy as np
import datasets
__snake_case: List[str] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
__snake_case: Optional[Any] = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
__snake_case: List[Any] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : str = np.array(lowerCAmelCase_ )
a_ : Optional[Any] = np.array(lowerCAmelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
a_ : Dict = X - np.mean(lowerCAmelCase_ )
a_ : Optional[int] = np.cov(reference_distribution.T )
try:
a_ : Any = np.linalg.inv(lowerCAmelCase_ )
except np.linalg.LinAlgError:
a_ : Any = np.linalg.pinv(lowerCAmelCase_ )
a_ : Tuple = np.dot(lowerCAmelCase_ , lowerCAmelCase_ )
a_ : List[str] = np.dot(lowerCAmelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 577
| 1
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : str = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = '''efficientformer'''
def __init__( self : int ,a__ : List[int] = [3, 2, 6, 4] ,a__ : List[int] = [48, 96, 2_24, 4_48] ,a__ : List[bool] = [True, True, True, True] ,a__ : int = 4_48 ,a__ : int = 32 ,a__ : int = 4 ,a__ : int = 7 ,a__ : int = 5 ,a__ : int = 8 ,a__ : int = 4 ,a__ : float = 0.0 ,a__ : int = 16 ,a__ : int = 3 ,a__ : int = 3 ,a__ : int = 3 ,a__ : int = 2 ,a__ : int = 1 ,a__ : float = 0.0 ,a__ : int = 1 ,a__ : bool = True ,a__ : bool = True ,a__ : float = 1e-5 ,a__ : str = "gelu" ,a__ : float = 0.02 ,a__ : float = 1e-12 ,a__ : int = 2_24 ,a__ : float = 1e-05 ,**a__ : Any ,):
super().__init__(**a__ )
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = hidden_sizes
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = initializer_range
a__ = layer_norm_eps
a__ = patch_size
a__ = num_channels
a__ = depths
a__ = mlp_expansion_ratio
a__ = downsamples
a__ = dim
a__ = key_dim
a__ = attention_ratio
a__ = resolution
a__ = pool_size
a__ = downsample_patch_size
a__ = downsample_stride
a__ = downsample_pad
a__ = drop_path_rate
a__ = num_metaad_blocks
a__ = distillation
a__ = use_layer_scale
a__ = layer_scale_init_value
a__ = image_size
a__ = batch_norm_eps
| 713
|
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if "model" in orig_key:
a__ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
a__ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
a__ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
a__ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
a__ = orig_key.split("." )[0].split("_" )[-1]
a__ = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
a__ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
a__ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
a__ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
a__ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
a__ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
a__ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
a__ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
a__ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
a__ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
a__ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
a__ = "yoso." + orig_key
return orig_key
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a__ = val
a__ = orig_state_dict["cls.predictions.decoder.bias"]
a__ = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = torch.load(_lowercase , map_location="cpu" )["model_state_dict"]
a__ = YosoConfig.from_json_file(_lowercase )
a__ = YosoForMaskedLM(_lowercase )
a__ = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ : Any = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 394
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 245
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
| 0
|
from typing import Any
import numpy as np
def __UpperCamelCase ( _A ):
return np.array_equal(_A , matrix.conjugate().T )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = v.conjugate().T
lowerCAmelCase_ = v_star.dot(_A )
assert isinstance(_A , np.ndarray )
return (v_star_dot.dot(_A )) / (v_star.dot(_A ))
def __UpperCamelCase ( ):
lowerCAmelCase_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowerCAmelCase_ = np.array([[1], [2], [3]] )
assert is_hermitian(_A ), f"{a} is not hermitian."
print(rayleigh_quotient(_A , _A ) )
lowerCAmelCase_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_A ), f"{a} is not hermitian."
assert rayleigh_quotient(_A , _A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 325
|
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = name
lowerCAmelCase_ = value
lowerCAmelCase_ = weight
def __repr__( self ):
"""simple docstring"""
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.value
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.name
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.weight
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.value / self.weight
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = []
for i in range(len(_A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = sorted(_A , key=_A , reverse=_A )
lowerCAmelCase_ = []
lowerCAmelCase_ , lowerCAmelCase_ = 0.0, 0.0
for i in range(len(_A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
| 1
|
'''simple docstring'''
import math
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int | float]]:
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _UpperCamelCase ( ) -> None:
# Training Examples ( m, n )
lowerCamelCase_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase_ = SelfOrganizingMap()
lowerCamelCase_ = 3
lowerCamelCase_ = 0.5
for _ in range(__UpperCamelCase ):
for j in range(len(__UpperCamelCase ) ):
# training sample
lowerCamelCase_ = training_samples[j]
# Compute the winning vector
lowerCamelCase_ = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase )
# Update the winning vector
lowerCamelCase_ = self_organizing_map.update(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# classify test sample
lowerCamelCase_ = [0, 0, 0, 1]
lowerCamelCase_ = self_organizing_map.get_winner(__UpperCamelCase ,__UpperCamelCase )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 42
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase_ (*lowerCAmelCase__: Dict ):
"""simple docstring"""
with open(lowerCAmelCase__ , """r""" ) as fh:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase__ )
finally:
fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN )
a : Tuple = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
a : Union[str, Any] = torch.device('cuda', local_rank)
a : Any = socket.gethostname()
a : Optional[Any] = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a : Optional[Any] = dist.get_rank()
a : List[Any] = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 556
| 0
|
'''simple docstring'''
import torch
def _lowerCAmelCase ():
"""simple docstring"""
if torch.cuda.is_available():
a__ = torch.cuda.device_count()
else:
a__ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 394
|
'''simple docstring'''
import os
UpperCamelCase_ : Dict = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = 0
a__ = 0
while index < len(_lowercase ) - 1:
a__ = SYMBOLS[numerals[index]]
a__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = ""
a__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
a__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
a__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowerCAmelCase (_lowercase = "/p089_roman.txt" ):
"""simple docstring"""
a__ = 0
with open(os.path.dirname(_lowercase ) + roman_numerals_filename ) as filea:
a__ = filea.readlines()
for line in lines:
a__ = line.strip()
a__ = parse_roman_numerals(_lowercase )
a__ = generate_roman_numerals(_lowercase )
savings += len(_lowercase ) - len(_lowercase )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 394
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowercase : int = LongformerTokenizer
lowercase : List[str] = True
lowercase : int = LongformerTokenizerFast
lowercase : Optional[Any] = True
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase : List[str] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__UpperCamelCase : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase : Optional[int] = {'unk_token': '<unk>'}
__UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Dict = 'lower newer'
__UpperCamelCase : Union[str, Any] = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : str = 'lower newer'
__UpperCamelCase : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__UpperCamelCase : Optional[int] = tokenizer.tokenize(_lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Dict = tokens + [tokenizer.unk_token]
__UpperCamelCase : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_lowerCAmelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_lowerCAmelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Any = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
__UpperCamelCase : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
__UpperCamelCase : str = tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
__UpperCamelCase : Any = tokenizer.encode(
"sequence builders" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__UpperCamelCase : List[str] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
__UpperCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Tuple = 'Encode this sequence.'
__UpperCamelCase : List[Any] = tokenizer.byte_encoder[' '.encode("utf-8" )[0]]
# Testing encoder arguments
__UpperCamelCase : int = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__UpperCamelCase : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
__UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
__UpperCamelCase : Optional[Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
__UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
# Testing spaces after special tokens
__UpperCamelCase : int = '<mask>'
tokenizer.add_special_tokens(
{"mask_token": AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )} ) # mask token has a left space
__UpperCamelCase : int = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
__UpperCamelCase : List[Any] = 'Encode <mask> sequence'
__UpperCamelCase : List[Any] = 'Encode <mask>sequence'
__UpperCamelCase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase )
__UpperCamelCase : int = encoded.index(_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__UpperCamelCase : Tuple = tokenizer.encode(_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = encoded.index(_lowerCAmelCase )
__UpperCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__UpperCamelCase : Dict = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__UpperCamelCase : List[str] = 'A, <mask> AllenNLP sentence.'
__UpperCamelCase : Any = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
__UpperCamelCase : Any = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__UpperCamelCase : str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__UpperCamelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _lowerCAmelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , _lowerCAmelCase )
self.assertEqual(post_processor_state["trim_offsets"] , _lowerCAmelCase )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCamelCase : int = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase : str = f'''{text_of_1_token} {text_of_1_token}'''
__UpperCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : Optional[int] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : Dict = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : int = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCAmelCase ), len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__UpperCamelCase : Optional[int] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__UpperCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : List[Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
__UpperCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , use_fast=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ), 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , )
| 327
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = LayoutLMTokenizer
_UpperCamelCase : Union[str, Any] = LayoutLMTokenizerFast
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = True
def __a ( self ):
super().setUp()
_lowercase : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __a ( self , **_lowerCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 'UNwant\u00E9d,running'
_lowercase : List[Any] = 'unwanted, running'
return input_text, output_text
def __a ( self ):
_lowercase : Dict = self.tokenizer_class(self.vocab_file )
_lowercase : Dict = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [7, 4, 5, 1_0, 8, 9] )
def __a ( self ):
pass
| 66
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _A ( _lowerCamelCase ):
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_A , '''num_attention_heads''' ) )
class _A :
def __init__( self : Any , _A : Optional[Any] , _A : Union[str, Any]=13 , _A : str=64 , _A : Tuple=3 , _A : Dict=3 , _A : Optional[Any]=2 , _A : Tuple=1 , _A : List[str]=16 , _A : Tuple=[128, 256, 384] , _A : str=[4, 6, 8] , _A : Any=[2, 3, 4] , _A : Union[str, Any]=[16, 16, 16] , _A : Optional[Any]=0 , _A : Union[str, Any]=[2, 2, 2] , _A : Any=[2, 2, 2] , _A : Union[str, Any]=0.02 , _A : Any=True , _A : List[Any]=True , _A : Optional[int]=2 , ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = parent
lowercase : str = batch_size
lowercase : int = image_size
lowercase : List[Any] = num_channels
lowercase : Optional[int] = kernel_size
lowercase : List[Any] = stride
lowercase : int = padding
lowercase : str = hidden_sizes
lowercase : Union[str, Any] = num_attention_heads
lowercase : Optional[int] = depths
lowercase : Dict = key_dim
lowercase : Any = drop_path_rate
lowercase : Dict = patch_size
lowercase : List[str] = attention_ratio
lowercase : Tuple = mlp_ratio
lowercase : Dict = initializer_range
lowercase : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase : List[str] = is_training
lowercase : Optional[Any] = use_labels
lowercase : Tuple = num_labels
lowercase : Union[str, Any] = initializer_range
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Union[str, Any] = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase : int = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __a ( self : int , _A : List[str] , _A : Tuple , _A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple = LevitModel(config=_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Any = (self.image_size, self.image_size)
lowercase : int = image_size[0], image_size[1]
for _ in range(4 ):
lowercase : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __a ( self : int , _A : List[str] , _A : List[str] , _A : str ) -> List[str]:
"""simple docstring"""
lowercase : Any = self.num_labels
lowercase : List[str] = LevitForImageClassification(_A )
model.to(_A )
model.eval()
lowercase : Dict = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase : int = self.prepare_config_and_inputs()
lowercase : Any = config_and_inputs
lowercase : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = False
def __a ( self : str ) -> Dict:
"""simple docstring"""
lowercase : Any = LevitModelTester(self )
lowercase : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def __a ( self : str ) -> List[str]:
"""simple docstring"""
lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(_A )
lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Optional[Any] = [*signature.parameters.keys()]
lowercase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_A : Dict , _A : Union[str, Any] , _A : int ):
lowercase : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
lowercase : List[Any] = model(**self._prepare_for_class(_A , _A ) )
lowercase : List[Any] = outputs.hidden_states
lowercase : str = len(self.model_tester.depths ) + 1
self.assertEqual(len(_A ) , _A )
lowercase : Optional[Any] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
lowercase : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase : Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple = True
check_hidden_states_output(_A , _A , _A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def __a ( self : List[str] , _A : Optional[Any] , _A : int , _A : str=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase : int = model_class(_A )
model.to(_A )
model.train()
lowercase : Any = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase : Optional[Any] = model(**_A ).loss
loss.backward()
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase : Tuple = False
lowercase : str = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase : Optional[int] = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
lowercase : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase : str = model(**_A ).loss
loss.backward()
def __a ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : str = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
lowercase : Dict = problem_type['''title''']
lowercase : Optional[Any] = problem_type['''num_labels''']
lowercase : List[str] = model_class(_A )
model.to(_A )
model.train()
lowercase : Union[str, Any] = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
lowercase : Tuple = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase : Any = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
lowercase : Optional[int] = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = LevitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_A )
lowercase : Tuple = self.default_image_processor
lowercase : Optional[Any] = prepare_img()
lowercase : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : Dict = model(**_A )
# verify the logits
lowercase : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
lowercase : int = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 709
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _A :
_UpperCamelCase : Dict = BlenderbotSmallConfig
_UpperCamelCase : int = {}
_UpperCamelCase : Optional[int] = '''gelu'''
def __init__( self : Union[str, Any] , _A : Union[str, Any] , _A : List[str]=13 , _A : Optional[int]=7 , _A : Optional[int]=True , _A : Any=False , _A : Optional[int]=99 , _A : Any=32 , _A : Optional[Any]=2 , _A : Any=4 , _A : int=37 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : int=20 , _A : Any=2 , _A : Optional[int]=1 , _A : str=0 , ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = parent
lowercase : List[str] = batch_size
lowercase : int = seq_length
lowercase : Optional[int] = is_training
lowercase : str = use_labels
lowercase : Any = vocab_size
lowercase : int = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Tuple = num_attention_heads
lowercase : Dict = intermediate_size
lowercase : Tuple = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Tuple = max_position_embeddings
lowercase : int = eos_token_id
lowercase : Tuple = pad_token_id
lowercase : List[Any] = bos_token_id
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Optional[Any] = prepare_blenderbot_small_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __a ( self : Optional[int] , _A : Tuple , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = TFBlenderbotSmallModel(config=_A ).get_decoder()
lowercase : List[str] = inputs_dict['''input_ids''']
lowercase : Union[str, Any] = input_ids[:1, :]
lowercase : str = inputs_dict['''attention_mask'''][:1, :]
lowercase : str = inputs_dict['''head_mask''']
lowercase : Optional[int] = 1
# first forward pass
lowercase : Union[str, Any] = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
lowercase , lowercase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase : Any = model(_A , attention_mask=_A )[0]
lowercase : Union[str, Any] = model(_A , attention_mask=_A , past_key_values=_A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowercase : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_A , _A , rtol=1E-3 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> str:
'''simple docstring'''
if attention_mask is None:
lowercase : Optional[Any] = tf.cast(tf.math.not_equal(__magic_name__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_UpperCamelCase : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_UpperCamelCase : Any = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = TFBlenderbotSmallModelTester(self )
lowercase : List[str] = ConfigTester(self , config_class=_A )
def __a ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_tokenizers
@require_tf
class _A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
_UpperCamelCase : Optional[Any] = '''facebook/blenderbot_small-90M'''
@cached_property
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.tokenizer(self.src_text , return_tensors='''tf''' )
lowercase : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_A , )
lowercase : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 596
| 0
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase__ ( UpperCAmelCase_="" ) -> str:
'''simple docstring'''
_lowercase : Any = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowercase : Optional[int] = AgentAudio(UpperCamelCase )
_lowercase : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase ) )
# Ensure that the file contains the same value as the original tensor
_lowercase , _lowercase : Dict = sf.read(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , torch.tensor(UpperCamelCase ) , atol=1E-4 ) )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_lowercase : Tuple = torch.rand(12 , dtype=torch.floataa ) - 0.5
_lowercase : Dict = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase , UpperCamelCase , 1_60_00 )
_lowercase : str = AgentAudio(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase )
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_lowercase : Dict = torch.randint(0 , 2_56 , (64, 64, 3) )
_lowercase : str = AgentImage(UpperCamelCase )
_lowercase : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase ) )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_lowercase : Tuple = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowercase : List[str] = Image.open(UpperCamelCase )
_lowercase : Any = AgentImage(UpperCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowercase : Optional[Any] = Image.open(UpperCamelCase )
_lowercase : int = AgentImage(UpperCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase ) )
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = '''Hey!'''
_lowercase : List[str] = AgentText(UpperCamelCase )
self.assertEqual(UpperCamelCase , agent_type.to_string() )
self.assertEqual(UpperCamelCase , agent_type.to_raw() )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 322
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = '''mra'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any]=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : Tuple=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : int=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : Union[str, Any]=1E-5 , UpperCamelCase : Tuple="absolute" , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : Optional[Any]="full" , UpperCamelCase : List[str]=0 , UpperCamelCase : Tuple=0 , UpperCamelCase : Any=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_lowercase : List[Any] = vocab_size
_lowercase : Any = max_position_embeddings
_lowercase : List[str] = hidden_size
_lowercase : Union[str, Any] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : Any = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : str = attention_probs_dropout_prob
_lowercase : int = initializer_range
_lowercase : Dict = type_vocab_size
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : Tuple = position_embedding_type
_lowercase : List[str] = block_per_row
_lowercase : int = approx_mode
_lowercase : Optional[Any] = initial_prior_first_n_blocks
_lowercase : Dict = initial_prior_diagonal_n_blocks
| 322
| 1
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def lowercase ( __snake_case : int ):
lowercase_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
lowercase_ : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
lowercase_ : int = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowercase ( __snake_case : int = 1_0_0_0 ):
lowercase_ : List[str] = pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 141
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A : int = logging.getLogger(__name__)
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = "sequence-classification"
def __init__( self : int , A : Union[str, Any] ) -> Optional[Any]:
if type(A ) == dict:
lowercase_ : Any = Namespace(**A )
lowercase_ : Tuple = glue_output_modes[hparams.task]
lowercase_ : Optional[int] = glue_tasks_num_labels[hparams.task]
super().__init__(A , A , self.mode )
def A ( self : Union[str, Any] , **A : List[Any] ) -> str:
return self.model(**A )
def A ( self : Any , A : List[Any] , A : Union[str, Any] ) -> str:
lowercase_ : Dict = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ : Union[str, Any] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase_ : int = self(**A )
lowercase_ : Optional[int] = outputs[0]
lowercase_ : Union[str, Any] = self.trainer.lr_schedulers[0]['''scheduler''']
lowercase_ : List[str] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ : str = self.hparams
lowercase_ : Any = processors[args.task]()
lowercase_ : Union[str, Any] = processor.get_labels()
for mode in ["train", "dev"]:
lowercase_ : Union[str, Any] = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase_ : Union[str, Any] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowercase_ : Tuple = convert_examples_to_features(
A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , A )
torch.save(A , A )
def A ( self : Optional[Any] , A : str , A : int , A : bool = False ) -> DataLoader:
lowercase_ : Union[str, Any] = '''dev''' if mode == '''test''' else mode
lowercase_ : List[Any] = self._feature_file(A )
logger.info('''Loading features from cached file %s''' , A )
lowercase_ : Optional[int] = torch.load(A )
lowercase_ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase_ : Union[str, Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase_ : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase_ : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A , shuffle=A , )
def A ( self : Dict , A : str , A : Union[str, Any] ) -> int:
lowercase_ : str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase_ : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowercase_ : List[str] = self(**A )
lowercase_ , lowercase_ : List[str] = outputs[:2]
lowercase_ : Dict = logits.detach().cpu().numpy()
lowercase_ : Optional[Any] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : List[str] , A : Optional[Any] ) -> tuple:
lowercase_ : Tuple = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase_ : Any = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase_ : Union[str, Any] = np.argmax(A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase_ : Optional[Any] = np.squeeze(A )
lowercase_ : str = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase_ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
lowercase_ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowercase_ : List[str] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , A , A )}
lowercase_ : List[str] = dict(results.items() )
lowercase_ : int = results
return ret, preds_list, out_label_list
def A ( self : str , A : list ) -> dict:
lowercase_ , lowercase_ , lowercase_ : List[Any] = self._eval_end(A )
lowercase_ : Dict = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : str , A : int ) -> dict:
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self._eval_end(A )
lowercase_ : Tuple = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( A : Optional[int] , A : Optional[Any] ) -> Optional[Any]:
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=A , required=A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowercase ( ):
lowercase_ : str = argparse.ArgumentParser()
add_generic_args(__snake_case , os.getcwd() )
lowercase_ : List[str] = GLUETransformer.add_model_specific_args(__snake_case , os.getcwd() )
lowercase_ : Dict = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase_ : int = os.path.join(
'''./results''' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
lowercase_ : List[Any] = GLUETransformer(__snake_case )
lowercase_ : List[Any] = generic_train(__snake_case , __snake_case )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase_ : List[str] = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__snake_case ) )
lowercase_ : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__snake_case )
if __name__ == "__main__":
main()
| 141
| 1
|
import heapq
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> set[int]:
"""simple docstring"""
a = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case_, [-1 * len(snake_case_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
a = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
a = heapq.heappop(snake_case_ )[1][0]
chosen_vertices.add(snake_case_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
a = elem[1][1].index(snake_case_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ : List[str] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 387
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=1_0_2_4, snake_case_=1_0_2_4, snake_case_=False, **snake_case_ ) -> Optional[Any]:
"""simple docstring"""
a = AutoTokenizer.from_pretrained(snake_case_ )
a = SeqaSeqDataset(snake_case_, snake_case_, snake_case_, snake_case_, type_path='''train''', **snake_case_ )
a = tok.pad_token_id
def get_lens(snake_case_ ):
a = tqdm(
DataLoader(snake_case_, batch_size=5_1_2, num_workers=8, shuffle=snake_case_, collate_fn=ds.collate_fn ), desc=str(ds.len_file ), )
a = []
for batch in dl:
a = batch['''input_ids'''].ne(snake_case_ ).sum(1 ).tolist()
a = batch['''labels'''].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_, snake_case_ ):
max_lens.append(max(snake_case_, snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
a = get_lens(snake_case_ )
a = SeqaSeqDataset(snake_case_, snake_case_, snake_case_, snake_case_, type_path='''val''', **snake_case_ )
a = get_lens(snake_case_ )
pickle_save(snake_case_, train_ds.len_file )
pickle_save(snake_case_, val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 387
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__UpperCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : int = qkv_bias
super().__init__(**lowerCamelCase_ )
| 79
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a__ ( __magic_name__ ):
lowercase_ = "gpt_neo"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , UpperCamelCase_ : str=50257 , UpperCamelCase_ : Optional[int]=2048 , UpperCamelCase_ : Dict=2048 , UpperCamelCase_ : int=24 , UpperCamelCase_ : int=[[["global", "local"], 12]] , UpperCamelCase_ : Dict=16 , UpperCamelCase_ : str=None , UpperCamelCase_ : int=256 , UpperCamelCase_ : str="gelu_new" , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Tuple=0.0 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Dict=1e-5 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=50256 , UpperCamelCase_ : Optional[Any]=50256 , **UpperCamelCase_ : str , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : Any = num_layers
__UpperCAmelCase : int = num_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Union[str, Any] = window_size
__UpperCAmelCase : Tuple = activation_function
__UpperCAmelCase : List[str] = resid_dropout
__UpperCAmelCase : Optional[int] = embed_dropout
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : Optional[Any] = classifier_dropout
__UpperCAmelCase : int = layer_norm_epsilon
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Union[str, Any] = use_cache
__UpperCAmelCase : Union[str, Any] = bos_token_id
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : Union[str, Any] = attention_types
__UpperCAmelCase : Dict = self.expand_attention_types_params(UpperCamelCase_)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument.")
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
@staticmethod
def a_ ( UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
import torch
__UpperCAmelCase : Any = input.size()
__UpperCAmelCase : Dict = len(UpperCamelCase )
__UpperCAmelCase : Any = shape[dimension]
__UpperCAmelCase : List[Any] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
__UpperCAmelCase : str = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
__UpperCAmelCase : List[str] = [slice(UpperCamelCase )] * rank
__UpperCAmelCase : List[Any] = indices
__UpperCAmelCase : Dict = input[s]
__UpperCAmelCase : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
import torch
__UpperCAmelCase : str = torch.arange(1 , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = torch.remainder(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Optional[int] = remainders == 0
__UpperCAmelCase : Optional[Any] = candidates[divisor_indices]
__UpperCAmelCase : List[str] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class a__ ( __magic_name__ ):
@property
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs")
__UpperCAmelCase : Dict = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a_ ( self : int):
"""simple docstring"""
return self._config.num_heads
def a_ ( self : Dict , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int = -1 , UpperCamelCase_ : int = -1 , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
__UpperCAmelCase : int = super(UpperCamelCase_ , self).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : str = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCAmelCase : str = seqlen + 2
__UpperCAmelCase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase : Dict = [
(torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(self.num_layers)
]
__UpperCAmelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
__UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype
__UpperCAmelCase : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1)
return ordered_inputs
@property
def a_ ( self : Any):
"""simple docstring"""
return 13
| 77
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _A ( snake_case__ : List[str]="ro" , snake_case__ : int="en" , snake_case__ : Any="wmt16" , snake_case__ : Optional[Any]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
snake_case__ : List[Any] = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
snake_case__ : Optional[Any] = datasets.load_dataset(snake_case__ , snake_case__ )
if save_dir is None:
snake_case__ : Optional[int] = f'''{dataset}-{pair}'''
snake_case__ : Optional[int] = Path(snake_case__ )
save_dir.mkdir(exist_ok=snake_case__ )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
snake_case__ : Optional[int] = '''val''' if split == '''validation''' else split
snake_case__ : Optional[Any] = save_dir.joinpath(f'''{fn}.source''' )
snake_case__ : Any = save_dir.joinpath(f'''{fn}.target''' )
snake_case__ : Union[str, Any] = src_path.open('''w+''' )
snake_case__ : str = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case__ : int = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 261
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase = n - k
# Calculate C(n,k)
for i in range(__SCREAMING_SNAKE_CASE ):
result *= n - i
result //= i + 1
return result
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return binomial_coefficient(2 * node_count , __SCREAMING_SNAKE_CASE ) // (node_count + 1)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowercase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return catalan_number(__SCREAMING_SNAKE_CASE ) * factorial(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 565
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''DeiTFeatureExtractor''']
UpperCAmelCase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase__ ( snake_case__, unittest.TestCase ):
_UpperCAmelCase :Dict = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Any=0 ):
lowerCamelCase_ : Tuple =np.random.RandomState(snake_case__ )
lowerCamelCase_ : Union[str, Any] ={
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[str] =self.get_dummy_inputs()
lowerCamelCase_ : List[str] =pipe(**snake_case__ ).images
lowerCamelCase_ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Dict =np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Union[str, Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Union[str, Any] =np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ ).images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Any =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : List[Any] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : str =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : Dict =pipe(**snake_case__ ).images
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Tuple =np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Optional[Any] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict =self.get_dummy_inputs()
lowerCamelCase_ : str =pipe(**snake_case__ ).images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : Optional[int] =np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase_ : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Optional[int] =self.get_dummy_inputs()
lowerCamelCase_ : Tuple =pipe(**snake_case__ ).images
lowerCamelCase_ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase_ : List[Any] =np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] =self.get_dummy_inputs()
lowerCamelCase_ : Optional[Any] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[int] =pipe(**snake_case__ )
lowerCamelCase_ : Dict =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : Any =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : Union[str, Any] =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Any =text_inputs["input_ids"]
lowerCamelCase_ : Dict =pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase_ : Union[str, Any] =prompt_embeds
# forward
lowerCamelCase_ : Tuple =pipe(**snake_case__ )
lowerCamelCase_ : List[str] =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] =self.get_dummy_inputs()
lowerCamelCase_ : Dict =3 * ["this is a negative prompt"]
lowerCamelCase_ : Tuple =negative_prompt
lowerCamelCase_ : List[str] =3 * [inputs["prompt"]]
# forward
lowerCamelCase_ : Optional[Any] =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
lowerCamelCase_ : str =self.get_dummy_inputs()
lowerCamelCase_ : int =3 * [inputs.pop("prompt" )]
lowerCamelCase_ : List[Any] =[]
for p in [prompt, negative_prompt]:
lowerCamelCase_ : Tuple =pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="np" , )
lowerCamelCase_ : Dict =text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase_ , lowerCamelCase_ : int =embeds
# forward
lowerCamelCase_ : str =pipe(**snake_case__ )
lowerCamelCase_ : Any =output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[str] =ort.SessionOptions()
lowerCamelCase_ : List[Any] =False
return options
def UpperCAmelCase__ ( self : Tuple ):
# using the PNDM scheduler by default
lowerCamelCase_ : Optional[Any] =OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : List[Any] ="A painting of a squirrel eating a burger"
np.random.seed(0 )
lowerCamelCase_ : Tuple =sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
lowerCamelCase_ : Union[str, Any] =output.images
lowerCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : List[str] =np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] =DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : int ="open neural network exchange"
lowerCamelCase_ : List[Any] =np.random.RandomState(0 )
lowerCamelCase_ : str =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Optional[int] =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Any =np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : str =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase_ : Any =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Union[str, Any] ="open neural network exchange"
lowerCamelCase_ : str =np.random.RandomState(0 )
lowerCamelCase_ : Optional[int] =sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type="np" )
lowerCamelCase_ : Dict =output.images
lowerCamelCase_ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Dict =np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any =0
def test_callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : np.ndarray ) -> None:
lowerCamelCase_ : Optional[int] =True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : List[str] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ : Optional[Any] =latents[0, -3:, -3:, -1]
lowerCamelCase_ : List[Any] =np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowerCamelCase_ : Any =False
lowerCamelCase_ : int =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Dict ="Andromeda galaxy in a bottle"
lowerCamelCase_ : Union[str, Any] =np.random.RandomState(0 )
pipe(
prompt=snake_case__ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case__ , callback=snake_case__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[str] =OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case__ , snake_case__ )
assert pipe.safety_checker is None
lowerCamelCase_ : Tuple =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowerCamelCase_ : str =OnnxStableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase_ : Any =pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 153
|
"""simple docstring"""
import enum
import shutil
import sys
A__ , A__ : Optional[Any] = shutil.get_terminal_size()
A__ : List[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowercase__ ( enum.Enum ):
_UpperCAmelCase :Optional[Any] = 0
_UpperCAmelCase :List[str] = 1
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any]="" ) -> Dict:
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any]="" ) -> int:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , lowerCamelCase__ )
def _snake_case ( ) -> Any:
forceWrite("\r" )
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : str ) -> List[str]:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _snake_case ( ) -> Optional[Any]:
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def _snake_case ( ) -> Dict:
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 153
| 1
|
'''simple docstring'''
def lowerCAmelCase__ ( a_ : int ) -> list[int]:
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
UpperCAmelCase__ : str = [True] * (num + 1)
UpperCAmelCase__ : Tuple = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
UpperCAmelCase__ : Dict = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 599
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 599
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''time_series_transformer'''
a = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , a__ = None , a__ = None , a__ = "student_t" , a__ = "nll" , a__ = 1 , a__ = [1, 2, 3, 4, 5, 6, 7] , a__ = "mean" , a__ = 0 , a__ = 0 , a__ = 0 , a__ = 0 , a__ = None , a__ = None , a__ = 32 , a__ = 32 , a__ = 2 , a__ = 2 , a__ = 2 , a__ = 2 , a__ = True , a__ = "gelu" , a__ = 64 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 0.1 , a__ = 100 , a__ = 0.02 , a__=True , **a__ , ):
# time series specific configuration
A_ : Dict = prediction_length
A_ : Optional[int] = context_length or prediction_length
A_ : Tuple = distribution_output
A_ : Any = loss
A_ : str = input_size
A_ : int = num_time_features
A_ : Any = lags_sequence
A_ : List[Any] = scaling
A_ : int = num_dynamic_real_features
A_ : List[str] = num_static_real_features
A_ : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a__ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : int = cardinality
else:
A_ : List[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a__ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : Optional[int] = embedding_dimension
else:
A_ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Tuple = num_parallel_samples
# Transformer architecture configuration
A_ : List[str] = input_size * len(a__ ) + self._number_of_features
A_ : Any = d_model
A_ : List[Any] = encoder_attention_heads
A_ : Union[str, Any] = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : str = encoder_layers
A_ : Union[str, Any] = decoder_layers
A_ : List[str] = dropout
A_ : Optional[int] = attention_dropout
A_ : int = activation_dropout
A_ : Optional[int] = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : Optional[int] = activation_function
A_ : Any = init_std
A_ : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def _lowerCamelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 569
|
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''bert-generation'''
def __init__( self , a__=50358 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.02 , a__=1E-12 , a__=0 , a__=2 , a__=1 , a__="absolute" , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
A_ : List[str] = vocab_size
A_ : int = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : List[Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : Optional[Any] = initializer_range
A_ : str = layer_norm_eps
A_ : str = position_embedding_type
A_ : List[Any] = use_cache
| 569
| 1
|
'''simple docstring'''
import operator as op
def __UpperCamelCase ( snake_case ) -> List[Any]:
'''simple docstring'''
__A = []
__A = lambda snake_case , snake_case : int(x / y ) # noqa: E731 integer division operation
__A = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(1_2 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (3_0 + len(snake_case )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(snake_case ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(1_2 ) , ''','''.join(snake_case ) , sep=''' | ''' )
else:
__A = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(1_2 ) , ''','''.join(snake_case ) , sep=''' | ''' )
__A = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(1_2 ) , ''','''.join(snake_case ) , sep=''' | ''' )
stack.append(
str(opr[x](int(snake_case ) , int(snake_case ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(1_2 ) , ''','''.join(snake_case ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
_UpperCamelCase : int = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 704
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_UpperCamelCase : Optional[int] = datasets.logging.get_logger(__name__)
_UpperCamelCase : Dict = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
_UpperCamelCase : Tuple = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
_UpperCamelCase : Union[str, Any] = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
_UpperCamelCase : str = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowerCAmelCase( datasets.Metric):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Any:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__A = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__A = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__A = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
__A = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__A = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase )-> List[str]:
__A = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase )
return {"scores": scores}
| 341
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __A ( UpperCamelCase__ ):
def __init__(self : str , __a : Tuple , __a : str , __a : Tuple=1024 , __a : Dict=1024 , __a : int=3.6 ):
UpperCAmelCase_ = tokenizer
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = dataset
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = seq_length * chars_per_token * num_of_sequences
def __iter__(self : str ):
UpperCAmelCase_ = iter(self.dataset )
UpperCAmelCase_ = True
while more_examples:
UpperCAmelCase_ , UpperCAmelCase_ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(UpperCAmelCase__ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase_ = False
break
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , truncation=UpperCAmelCase__ )["input_ids"]
UpperCAmelCase_ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(UpperCAmelCase__ ) , self.seq_length ):
UpperCAmelCase_ = all_token_ids[i : i + self.seq_length]
if len(UpperCAmelCase__ ) == self.seq_length:
yield torch.tensor(UpperCAmelCase__ )
def lowerCAmelCase_ ( snake_case_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = {"streaming": True}
UpperCAmelCase_ = load_dataset(args.dataset_name , split="train" , **lowerCAmelCase_ )
UpperCAmelCase_ = ConstantLengthDataset(lowerCAmelCase_ , lowerCAmelCase_ , seq_length=args.seq_length )
UpperCAmelCase_ = DataLoader(lowerCAmelCase_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase_ ( snake_case_ : Dict ) -> str:
'''simple docstring'''
model.eval()
UpperCAmelCase_ = []
for step, batch in enumerate(lowerCAmelCase_ ):
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
UpperCAmelCase_ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowerCAmelCase_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase_ = torch.mean(torch.cat(lowerCAmelCase_ ) )
try:
UpperCAmelCase_ = torch.exp(lowerCAmelCase_ )
except OverflowError:
UpperCAmelCase_ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
SCREAMING_SNAKE_CASE_: Tuple =Accelerator()
# Parse configuration
SCREAMING_SNAKE_CASE_: Tuple =HfArgumentParser(EvaluationArguments)
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_args()
set_seed(args.seed)
# Logging
SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoModelForCausalLM.from_pretrained(args.model_ckpt)
SCREAMING_SNAKE_CASE_: int =AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
SCREAMING_SNAKE_CASE_: List[Any] =create_dataloader(args)
# Prepare everything with our `accelerator`.
SCREAMING_SNAKE_CASE_: Tuple =accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
SCREAMING_SNAKE_CASE_: Union[str, Any] =evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 78
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1.5
__SCREAMING_SNAKE_CASE = int(factor * num_class_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
__SCREAMING_SNAKE_CASE = client.query(text=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__SCREAMING_SNAKE_CASE = int(factor * num_images )
__SCREAMING_SNAKE_CASE = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase_ )
with open(f"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(f"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
f"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
__SCREAMING_SNAKE_CASE = class_images[count]
count += 1
try:
__SCREAMING_SNAKE_CASE = requests.get(images["url"] )
if img.status_code == 200:
__SCREAMING_SNAKE_CASE = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("" , add_help=lowerCAmelCase_ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=lowerCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
a__ : Optional[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 682
| 0
|
def __UpperCamelCase( _A : List[str] ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase__ : Dict = head.next, head
while fast and fast.next:
UpperCAmelCase__ : Union[str, Any] = fast.next.next
UpperCAmelCase__ : Dict = slow.next
UpperCAmelCase__ : Optional[Any] = slow.next
UpperCAmelCase__ : str = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase__ : Optional[Any] = None
while second:
UpperCAmelCase__ : Dict = second.next
UpperCAmelCase__ : List[str] = node
UpperCAmelCase__ : List[Any] = second
UpperCAmelCase__ : int = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase__ : List[str] = node.next
UpperCAmelCase__ : str = head.next
return True
def __UpperCamelCase( _A : List[Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase__ : Optional[int] = head
while fast and fast.next:
UpperCAmelCase__ : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase__ : Optional[int] = [slow.val]
while slow.next:
UpperCAmelCase__ : int = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase__ : Union[str, Any] = cur.next
return True
def __UpperCamelCase( _A : Optional[int] ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Any = 0
while head:
if head.val in d:
d[head.val].append(_lowerCAmelCase )
else:
UpperCAmelCase__ : Optional[int] = [pos]
UpperCAmelCase__ : Optional[int] = head.next
pos += 1
UpperCAmelCase__ : Any = pos - 1
UpperCAmelCase__ : Dict = 0
for v in d.values():
if len(_lowerCAmelCase ) % 2 != 0:
middle += 1
else:
UpperCAmelCase__ : str = 0
for i in range(0 , len(_lowerCAmelCase ) ):
if v[i] + v[len(_lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 716
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __UpperCamelCase( _A : Union[str, Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCamelCase( _A : List[Any] , _A : Dict , _A : str , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : str = 10_00
UpperCAmelCase__ : str = '''huggingface/label-files'''
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Tuple = json.load(open(cached_download(hf_hub_url(_A , _A , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase__ : Any = {int(_A ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[str] = idalabel
UpperCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : int = CvtConfig(num_labels=_A , idalabel=_A , labelaid=_A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase__ : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase__ : List[str] = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase__ : Optional[int] = [2, 2, 20]
UpperCAmelCase__ : str = [3, 12, 16]
UpperCAmelCase__ : Union[str, Any] = [1_92, 7_68, 10_24]
UpperCAmelCase__ : Optional[int] = CvtForImageClassification(_A )
UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase__ : Dict = image_size
UpperCAmelCase__ : Union[str, Any] = torch.load(_A , map_location=torch.device('''cpu''' ) )
UpperCAmelCase__ : Union[str, Any] = OrderedDict()
UpperCAmelCase__ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase__ : Optional[int] = list_of_state_dict + cls_token(_A )
UpperCAmelCase__ : Union[str, Any] = list_of_state_dict + embeddings(_A )
for cnt in range(config.depth[idx] ):
UpperCAmelCase__ : str = list_of_state_dict + attention(_A , _A )
UpperCAmelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_A )
for i in range(len(_A ) ):
UpperCAmelCase__ : List[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 496
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> str:
def wrapper(*a__ : List[Any] ,**a__ : str ):
__A : List[Any] = timeit.default_timer()
__A : Any = func(*a__ ,**a__ )
__A : Union[str, Any] = timeit.default_timer() - starttime
return delta
__A : Dict = func.__name__
return wrapper
def __SCREAMING_SNAKE_CASE ( a__ : dict ,a__ : Dict=100 ,a__ : Optional[Any]=None ) -> Dict:
__A : Optional[int] = []
__A : Dict = seq_shapes or {}
for i in range(a__ ):
__A : Optional[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(a__ ,_ArrayXD ):
__A : Any = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(a__ ,datasets.Value ):
if v.dtype == "string":
__A : List[str] = """The small grey turtle was surprisingly fast when challenged."""
else:
__A : int = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(a__ ,datasets.Sequence ):
while isinstance(a__ ,datasets.Sequence ):
__A : str = v.feature
__A : List[Any] = seq_shapes[k]
__A : Dict = np.random.rand(*a__ ).astype(v.dtype )
__A : Any = data
dummy_data.append((i, example) )
return dummy_data
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Optional[int] ,a__ : Any=100 ,a__ : Optional[Any]=None ) -> str:
__A : int = generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__ )
with ArrowWriter(features=a__ ,path=a__ ) as writer:
for key, record in dummy_data:
__A : List[str] = features.encode_example(a__ )
writer.write(a__ )
__A , __A : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__A : Optional[Any] = datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__ ) )
return dataset
| 17
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=[1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=0.0_2 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Union[str, Any]=[1, 2, 3] , ) -> str:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : int = image_size
_a : Optional[Any] = patch_size
_a : Tuple = num_channels
_a : str = embed_dim
_a : int = depths
_a : List[Any] = num_heads
_a : int = window_size
_a : Optional[int] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[Any] = hidden_act
_a : List[Any] = use_absolute_embeddings
_a : Optional[Any] = patch_norm
_a : Dict = layer_norm_eps
_a : Dict = initializer_range
_a : Union[str, Any] = is_training
_a : List[str] = scope
_a : Any = use_labels
_a : Any = type_sequence_label_size
_a : Dict = encoder_stride
_a : Optional[int] = out_features
_a : Any = out_indices
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> List[str]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = MaskFormerSwinModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Any = model(UpperCAmelCase__ )
_a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
_a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase__ ):
_a : Dict = ["""stem"""]
_a : Optional[int] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Any ) -> int:
_a : Optional[int] = MaskFormerSwinModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : int ) -> str:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Dict ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
def _lowercase ( self : int ) -> Union[str, Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Any ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : Any ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a : Dict = outputs.hidden_states
_a : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Swin has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : str ) -> Dict:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Any ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> List[str]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase__ : List[Any] ):
_a : int = 0
return t
def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]={} ):
with torch.no_grad():
_a : Union[str, Any] = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Any = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase__ ) , set_nan_tensor_to_zero(UpperCAmelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}."""
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : List[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : Tuple = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
UpperCamelCase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = MaskFormerSwinConfig
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_a : Optional[int] = backbone_class(UpperCAmelCase__ )
backbone.to(UpperCAmelCase__ )
backbone.eval()
_a : Any = backbone(**UpperCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_a : Union[str, Any] = backbone(**UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_a , _a , _a : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_a : Any = backbone(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 389
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A ={"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 241
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
@require_torch
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__UpperCAmelCase : Optional[int] = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__UpperCAmelCase : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__UpperCAmelCase : List[Any] = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task='''fill-mask''' , model=a_ )
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : Dict = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__UpperCAmelCase : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCAmelCase : List[str] = '''1'''
__UpperCAmelCase : Dict = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
__UpperCAmelCase : Tuple = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
__UpperCAmelCase : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
__UpperCAmelCase : str = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task='''fill-mask''' , model=a_ )
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : Dict = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
__UpperCAmelCase : Any = self.get_env()
__UpperCAmelCase : List[Any] = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
__UpperCAmelCase : List[str] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
__UpperCAmelCase : Any = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__UpperCAmelCase : Tuple = self.get_env()
__UpperCAmelCase : Dict = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
__UpperCAmelCase : Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCAmelCase : Tuple = '''1'''
__UpperCAmelCase : List[Any] = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = '''
from transformers import pipeline
'''
__UpperCAmelCase : Optional[Any] = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
__UpperCAmelCase : List[Any] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
__UpperCAmelCase : Any = self.get_env()
__UpperCAmelCase : Dict = '''1'''
__UpperCAmelCase : Dict = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
__UpperCAmelCase : str = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = '''
from transformers import AutoModel
'''
__UpperCAmelCase : Any = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
__UpperCAmelCase : List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
__UpperCAmelCase : Optional[int] = self.get_env()
__UpperCAmelCase : Dict = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__UpperCAmelCase : Optional[int] = '''1'''
__UpperCAmelCase : Optional[int] = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 241
| 1
|
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCamelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCamelCase : List[str] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 4
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__: List[str] = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase__ :
__lowerCamelCase = OPTConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Dict=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Any=False , __a : Tuple=99 , __a : Optional[int]=16 , __a : Any=2 , __a : Optional[Any]=4 , __a : Union[str, Any]=4 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : int=0.1 , __a : List[Any]=20 , __a : Tuple=2 , __a : str=1 , __a : str=0 , __a : List[Any]=16 , __a : Optional[Any]=16 , ):
'''simple docstring'''
lowerCamelCase__: List[str] = parent
lowerCamelCase__: List[str] = batch_size
lowerCamelCase__: Dict = seq_length
lowerCamelCase__: List[str] = is_training
lowerCamelCase__: Dict = use_labels
lowerCamelCase__: Union[str, Any] = vocab_size
lowerCamelCase__: Union[str, Any] = hidden_size
lowerCamelCase__: Any = num_hidden_layers
lowerCamelCase__: Union[str, Any] = num_attention_heads
lowerCamelCase__: Tuple = intermediate_size
lowerCamelCase__: Optional[int] = hidden_act
lowerCamelCase__: Union[str, Any] = hidden_dropout_prob
lowerCamelCase__: str = attention_probs_dropout_prob
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: Tuple = eos_token_id
lowerCamelCase__: Any = pad_token_id
lowerCamelCase__: str = bos_token_id
lowerCamelCase__: Optional[int] = embed_dim
lowerCamelCase__: Union[str, Any] = word_embed_proj_dim
lowerCamelCase__: List[Any] = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__: Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__: Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__: Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase__: Optional[Any] = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def lowerCamelCase_ ( self : str , __a : Optional[Any] , __a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = TFOPTModel(config=__a )
lowerCamelCase__: Optional[Any] = inputs_dict["""input_ids"""]
lowerCamelCase__: Dict = input_ids[:1, :]
lowerCamelCase__: Any = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__: Any = 1
# first forward pass
lowerCamelCase__: str = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase__ , lowerCamelCase__: Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__: Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__: str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__: Any = model(__a , attention_mask=__a )[0]
lowerCamelCase__: Any = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 10
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a : Optional[int] , __a : Dict ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase__: int = model_class(config=__a )
lowerCamelCase__: Tuple = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase__: str = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: List[str] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__: Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase__: Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: Any = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase__: List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: List[Any] = False
self.assertTrue(__a )
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(_UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
__lowerCamelCase = 99
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase__: Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase__: Any = input_ids.shape[0]
lowerCamelCase__: List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase__: List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase__: Optional[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__: str = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase__: str = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase__: str = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase__: Optional[int] = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Dict = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__: Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__: Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__: Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase__: Dict = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase__: Any = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """facebook/opt-125m"""
lowerCamelCase__: Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Any = []
lowerCamelCase__: Optional[Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: Dict = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Any = model.generate(__a , max_length=10 )
lowerCamelCase__: Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
lowerCamelCase__: Tuple = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: Any = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase__: Tuple = """left"""
# use different length sentences to test batching
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase__: List[Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase__: Any = inputs["""input_ids"""]
lowerCamelCase__: int = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase__: Optional[int] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[Any] = model.generate(input_ids=__a )
lowerCamelCase__: int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase__: Dict = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: str = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase__: List[str] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Dict = """facebook/opt-350m"""
lowerCamelCase__: Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Dict = []
lowerCamelCase__: int = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: str = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[int] = model.generate(__a , max_length=10 )
lowerCamelCase__: Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 306
| 0
|
import os
import sys
import unittest
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_lowerCAmelCase = os.path.join(git_repo_path, """src""", """diffusers""")
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
A_ : int = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a__ , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ : str = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a__ , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a__ , """torch_and_transformers_and_onnx""" )
def _lowerCamelCase ( self ):
A_ : Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a__ )
self.assertIn("""torch_and_transformers""" , a__ )
self.assertIn("""flax_and_transformers""" , a__ )
self.assertIn("""torch_and_transformers_and_onnx""" , a__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _lowerCamelCase ( self ):
A_ : str = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a__ , """\nCONSTANT = None\n""" )
A_ : Tuple = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
A_ : Dict = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
A_ : Optional[int] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a__ , a__ )
def _lowerCamelCase ( self ):
A_ : Optional[int] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
A_ : Any = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a__ )
| 708
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
'''simple docstring'''
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Tuple = constant_matrix.shape
if rowsa != colsa:
A_ : int = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
A_ : List[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
A_ : str = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
A_ : Any = (
"""Number of initial values must be equal to number of rows in coefficient """
f"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
A_ , A_ : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
A_ : Union[str, Any] = []
for row in range(_lowerCAmelCase ):
A_ : str = 0
for col in range(_lowerCAmelCase ):
if col == row:
A_ : Optional[Any] = table[row][col]
elif col == cols - 1:
A_ : List[str] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : str = (temp + val) / denom
new_val.append(_lowerCAmelCase )
A_ : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ , A_ : str = table.shape
A_ : Any = True
for i in range(0 ,_lowerCAmelCase ):
A_ : Optional[Any] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 481
| 0
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : str , __snake_case : str ) -> Union[str, Any]:
__A : int = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
__A : Tuple = torch.load(hf_hub_download(repo_id=__snake_case , filename='pytorch_model.bin' ) )
__A : str = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
__A : Dict = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
__A : str = tensor_value
__A : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
__A : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 8
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Tuple = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
lowercase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
| 0
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def snake_case__ ( a , a ) -> Optional[Any]:
'''simple docstring'''
snake_case__ = Mock()
snake_case__ = conn, Mock()
snake_case__ = iter([1, None] )
snake_case__ = lambda a : next(_lowercase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 709
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __magic_name__( __lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ : Tuple = DebertaVaTokenizer
UpperCAmelCase_ : Any = DebertaVaTokenizerFast
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Tuple = True
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase( self : Optional[int] , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = """this is a test"""
snake_case__ = """this is a test"""
return input_text, output_text
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = """<pad>"""
snake_case__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__UpperCamelCase ) , 3_0_0_0_1 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = """ \tHeLLo!how \n Are yoU? """
snake_case__ = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
pass
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : int ):
'''simple docstring'''
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = """ \tHeLLo!how \n Are yoU? """
snake_case__ = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , do_lower_case=__UpperCamelCase , split_by_punct=__UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(__UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = """This is a test"""
snake_case__ = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
snake_case__ = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case__ = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
snake_case__ = DebertaVaTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ = DebertaVaTokenizerFast(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# fmt: off
snake_case__ = """I was born in 92000, and this is falsé."""
snake_case__ = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
snake_case__ = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
snake_case__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
snake_case__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
snake_case__ = rust_tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = DebertaVaTokenizer(__UpperCamelCase )
snake_case__ = tokenizer.encode("""sequence builders""" )
snake_case__ = tokenizer.encode("""multi-sequence build""" )
snake_case__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
snake_case__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCamelCase , )
@slow
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = {"""input_ids""": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 566
| 0
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_lowerCamelCase ), magnitude * sin(_lowerCamelCase )]
return [magnitude * cos(radians(_lowerCamelCase ) ), magnitude * sin(radians(_lowerCamelCase ) )]
def _UpperCamelCase (_lowerCamelCase : NDArray[floataa] , _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : float = 10**-1 )-> bool:
'''simple docstring'''
__snake_case = cross(_lowerCamelCase , _lowerCamelCase )
__snake_case = sum(_lowerCamelCase )
return abs(_lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase_ : List[str] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase_ : str = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase_ : Optional[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase_ : List[Any] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase_ : int = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 24
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = RoFormerTokenizer
def __init__( self :Tuple , _lowerCamelCase :Dict=None , _lowerCamelCase :Dict=None , _lowerCamelCase :List[Any]=True , _lowerCamelCase :Dict="[UNK]" , _lowerCamelCase :List[str]="[SEP]" , _lowerCamelCase :str="[PAD]" , _lowerCamelCase :Optional[Any]="[CLS]" , _lowerCamelCase :Optional[int]="[MASK]" , _lowerCamelCase :str=True , _lowerCamelCase :Tuple=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
):
UpperCamelCase_ : Optional[Any] =getattr(_lowerCamelCase , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Tuple =do_lower_case
UpperCamelCase_ : Union[str, Any] =strip_accents
UpperCamelCase_ : Union[str, Any] =pre_tok_class(**_lowerCamelCase )
UpperCamelCase_ : List[str] =do_lower_case
def __getstate__( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.__dict__.copy()
UpperCamelCase_ : Union[str, Any] =BertPreTokenizer()
return state
def __setstate__( self :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =d
UpperCamelCase_ : Optional[int] =self.__dict__['_tokenizer'].get_vocab()
UpperCamelCase_ : Any =PreTokenizer.custom(JiebaPreTokenizer(_lowerCamelCase ) )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any=None ):
'''simple docstring'''
UpperCamelCase_ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =[self.sep_token_id]
UpperCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :int=False , **_lowerCamelCase :Optional[int] , ):
'''simple docstring'''
UpperCamelCase_ : str =BertPreTokenizer()
return super().save_pretrained(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
| 357
| 0
|
'''simple docstring'''
def A ( A_ : Tuple , A_ : Any ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A ( A_ : Tuple , A_ : List[Any]=0 ):
return sorted(A_ , key=lambda A_ : x[column] )
def A ( A_ : Optional[int] , A_ : int , A_ : List[str]=float('''inf''' ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , A_ ):
snake_case : Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case : Tuple = current_dis
return min_dis
def A ( A_ : Any , A_ : List[Any] , A_ : Any=float('''inf''' ) ):
for i in range(min(6 , points_counts - 1 ) , A_ ):
for j in range(max(0 , i - 6 ) , A_ ):
snake_case : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case : Any = current_dis
return min_dis
def A ( A_ : Tuple , A_ : str , A_ : Optional[Any] ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(A_ , A_ )
# recursion
snake_case : str = points_counts // 2
snake_case : str = closest_pair_of_points_sqr(
A_ , points_sorted_on_y[:mid] , A_ )
snake_case : List[Any] = closest_pair_of_points_sqr(
A_ , points_sorted_on_y[mid:] , points_counts - mid )
snake_case : Any = min(A_ , A_ )
snake_case : Union[str, Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(A_ )
snake_case : Any = dis_between_closest_in_strip(
A_ , len(A_ ) , A_ )
return min(A_ , A_ )
def A ( A_ : Optional[int] , A_ : int ):
snake_case : List[str] = column_based_sort(A_ , column=0 )
snake_case : Optional[Any] = column_based_sort(A_ , column=1 )
return (
closest_pair_of_points_sqr(
A_ , A_ , A_ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 555
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 555
| 1
|
def UpperCamelCase ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowercase__ : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowercase__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowercase__ : Optional[Any] = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowercase__ : Optional[int] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowercase__ : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowercase__ : int = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowercase__ : Optional[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowercase__ : Any = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowercase__ : Optional[Any] = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowercase__ : Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowercase__ : Any = key.replace("""text_projection""" , """flava.text_projection""" )
lowercase__ : List[Any] = key.replace("""image_projection""" , """flava.image_projection""" )
lowercase__ : str = value.float()
for key, value in codebook_state_dict.items():
lowercase__ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
lowercase__ : int = FlavaConfig.from_pretrained(lowercase_ )
else:
lowercase__ : Optional[int] = FlavaConfig()
lowercase__ : List[Any] = FlavaForPreTraining(lowercase_ ).eval()
lowercase__ : Dict = convert_dalle_checkpoint(lowercase_ , lowercase_ , save_checkpoint=lowercase_ )
if os.path.exists(lowercase_ ):
lowercase__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
lowercase__ : int = upgrade_state_dict(lowercase_ , lowercase_ )
hf_model.load_state_dict(lowercase_ )
lowercase__ : Optional[int] = hf_model.state_dict()
lowercase__ : Optional[int] = count_parameters(lowercase_ )
lowercase__ : Any = count_parameters(lowercase_ ) + count_parameters(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ : List[str] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 12
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase (snake_case__ : Any=32 , snake_case__ : int=10 , snake_case__ : Optional[int]=100 , snake_case__ : Optional[int]=1_026 , snake_case__ : Union[str, Any]=True , snake_case__ : Any="data/tokenized_stories_train_wikitext103.jbl" , snake_case__ : Union[str, Any]="igf_context_pairs.jbl" , ) -> Union[str, Any]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
lowerCAmelCase , lowerCAmelCase = generate_datasets(
snake_case__ , snake_case__ , number=snake_case__ , min_len=1_026 , trim=snake_case__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
lowerCAmelCase = load_gpta("""gpt2""" ).to(snake_case__ )
print("""computing perplexity on objective set""" )
lowerCAmelCase = compute_perplexity(snake_case__ , snake_case__ , snake_case__ ).item()
print("""perplexity on objective set:""" , snake_case__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase (snake_case__ : List[Any] , snake_case__ : List[str]=15 , snake_case__ : str=128 , snake_case__ : int=100 , snake_case__ : Optional[int]="igf_model.pt" , ) -> Dict:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
lowerCAmelCase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
lowerCAmelCase = SecondaryLearner(snake_case__ )
# Train secondary learner
lowerCAmelCase = train_secondary_learner(
snake_case__ , snake_case__ , max_epochs=snake_case__ , batch_size=snake_case__ , eval_freq=100 , igf_model_path=snake_case__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase (snake_case__ : int , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : List[str]=32 , snake_case__ : str=1_000 , snake_case__ : str=16 , snake_case__ : Dict=1.0 , snake_case__ : List[str]=recopy_gpta , snake_case__ : Optional[int]=None , snake_case__ : int=10 , snake_case__ : Dict="gpt2_finetuned.pt" , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
lowerCAmelCase = RandomSampler(snake_case__ )
lowerCAmelCase = DataLoader(snake_case__ , sampler=snake_case__ )
lowerCAmelCase = max_steps // (len(snake_case__ )) + 1
lowerCAmelCase = 0
lowerCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = recopy_model(snake_case__ , snake_case__ , snake_case__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case__ )
secondary_learner.eval()
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = []
lowerCAmelCase = []
# Compute the performance of the transformer model at the beginning
lowerCAmelCase = compute_perplexity(snake_case__ , snake_case__ , snake_case__ )
test_perps.append(snake_case__ )
print("""Test perplexity, step""" , snake_case__ , """:""" , snake_case__ )
for epoch in range(int(snake_case__ ) ):
for step, example in enumerate(snake_case__ ):
torch.cuda.empty_cache()
lowerCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
lowerCAmelCase = True
if secondary_learner is not None:
lowerCAmelCase = secondary_learner.forward(
torch.tensor(snake_case__ , dtype=torch.long , device=snake_case__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCAmelCase = -1
if predicted_q < threshold:
lowerCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCAmelCase = compute_perplexity(snake_case__ , snake_case__ , snake_case__ )
test_perps.append(snake_case__ )
print("""Test perplexity, step""" , snake_case__ , """:""" , snake_case__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase () -> str:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=snake_case__ , default=snake_case__ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=snake_case__ , default=snake_case__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=snake_case__ , type=snake_case__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=snake_case__ , default=snake_case__ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=snake_case__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=snake_case__ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=snake_case__ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_000 , type=snake_case__ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=snake_case__ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=snake_case__ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=snake_case__ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=snake_case__ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_026 , type=snake_case__ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=snake_case__ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=snake_case__ , type=snake_case__ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=snake_case__ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=snake_case__ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=snake_case__ , type=snake_case__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=snake_case__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
lowerCAmelCase = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
lowerCAmelCase = training_secondary_learner(
snake_case__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
lowerCAmelCase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCAmelCase , lowerCAmelCase = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=snake_case__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case__ , snake_case__ , snake_case__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case__ , secondary_learner=snake_case__ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 718
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase = get_sagemaker_input()
else:
lowerCAmelCase = get_cluster_input()
return config
def lowercase (snake_case__ : List[str]=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser("""config""" , description=snake_case__ )
else:
lowerCAmelCase = argparse.ArgumentParser("""Accelerate config command""" , description=snake_case__ )
parser.add_argument(
"""--config_file""" , default=snake_case__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def lowercase (snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = get_user_input()
if args.config_file is not None:
lowerCAmelCase = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
lowerCAmelCase = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f'''accelerate configuration saved at {config_file}''' )
def lowercase () -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = config_command_parser()
lowerCAmelCase = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 529
| 0
|
"""simple docstring"""
def A_ ( ):
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
__SCREAMING_SNAKE_CASE = generate_large_matrix()
__SCREAMING_SNAKE_CASE = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( __lowercase ):
assert all(row == sorted(__lowercase , reverse=__lowercase ) for row in grid )
assert all(list(__lowercase ) == sorted(__lowercase , reverse=__lowercase ) for col in zip(*__lowercase ) )
def A_ ( __lowercase ):
UpperCamelCase_ : List[Any] =0
UpperCamelCase_ : List[str] =len(__lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
UpperCamelCase_ : Dict =(left + right) // 2
UpperCamelCase_ : str =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
UpperCamelCase_ : Tuple =mid + 1
else:
UpperCamelCase_ : Tuple =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowercase )
def A_ ( __lowercase ):
UpperCamelCase_ : Tuple =0
UpperCamelCase_ : List[Any] =len(grid[0] )
for i in range(len(__lowercase ) ):
UpperCamelCase_ : Optional[Any] =find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowercase ) * len(grid[0] )) - total
def A_ ( __lowercase ):
return len([number for row in grid for number in row if number < 0] )
def A_ ( __lowercase ):
UpperCamelCase_ : Union[str, Any] =0
for row in grid:
for i, number in enumerate(__lowercase ):
if number < 0:
total += len(__lowercase ) - i
break
return total
def A_ ( ):
from timeit import timeit
print('Running benchmarks' )
UpperCamelCase_ : str =(
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
UpperCamelCase_ : List[Any] =timeit(F'''{func}(grid=grid)''' , setup=__lowercase , number=5_00 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 357
|
'''simple docstring'''
import numpy
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
a__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
a__ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
a__ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
a__ = numpy.random.rand(3 , 1 )
# Real output values provided.
a__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
a__ = numpy.zeros(output_array.shape )
def lowercase__ ( self ):
"""simple docstring"""
a__ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase__ ( self ):
"""simple docstring"""
a__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
a__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
a__ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
a__ = self.feedforward()
self.back_propagation()
if give_loss:
a__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = input_arr
a__ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
a__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase_ ( a : numpy.ndarray ):
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase_ ( a : numpy.ndarray ):
return (value) * (1 - (value))
def lowerCAmelCase_ ( ):
a__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
a__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
a__ = TwoHiddenLayerNeuralNetwork(
input_array=a , output_array=a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a , iterations=10 , give_loss=a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 394
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Whether tp freeze the encoder."} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
snake_case__ = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
snake_case__ = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
snake_case__ = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case__ = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
snake_case__ = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
snake_case__ = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Source language id for translation."} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "Target language id for translation."} )
snake_case__ = field(default=UpperCamelCase__ , metadata={"help": "# num_beams to use for evaluation."} )
snake_case__ = field(
default=UpperCamelCase__ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , F'{split}_results.json' ) )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCAmelCase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCAmelCase__ = SeqaSeqDataset
# Get datasets
lowerCAmelCase__ = (
dataset_class(
_UpperCamelCase , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowerCAmelCase__ = (
dataset_class(
_UpperCamelCase , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCAmelCase__ = (
dataset_class(
_UpperCamelCase , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCAmelCase__ = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
lowerCAmelCase__ = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
lowerCAmelCase__ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowerCAmelCase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate(metric_key_prefix="val" )
lowerCAmelCase__ = data_args.n_val
lowerCAmelCase__ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase__ = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix="test" )
lowerCAmelCase__ = test_output.metrics
lowerCAmelCase__ = data_args.n_test
if trainer.is_world_process_zero():
lowerCAmelCase__ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
lowerCAmelCase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
lowerCAmelCase__ = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 721
|
import functools
from typing import Any
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : list[str] ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or len(lowerCAmelCase_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCAmelCase__ = {}
lowerCAmelCase__ = "WORD_KEEPER"
for word in words:
lowerCAmelCase__ = trie
for c in word:
if c not in trie_node:
lowerCAmelCase__ = {}
lowerCAmelCase__ = trie_node[c]
lowerCAmelCase__ = True
lowerCAmelCase__ = len(lowerCAmelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCAmelCase_ : int ) -> bool:
if index == len_string:
return True
lowerCAmelCase__ = trie
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase__ = trie_node.get(string[i] , lowerCAmelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase_ , lowerCAmelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125
| 0
|
"""simple docstring"""
import os
def _lowerCamelCase ( _UpperCamelCase = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(_UpperCamelCase ) , _UpperCamelCase ) ) as input_file:
__lowerCAmelCase = [
[int(_UpperCamelCase ) for element in line.split("," )]
for line in input_file.readlines()
]
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = len(matrix[0] )
__lowerCAmelCase = [[-1 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
__lowerCAmelCase = matrix[i][0]
for j in range(1 , _UpperCamelCase ):
for i in range(_UpperCamelCase ):
__lowerCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCamelCase ):
__lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 636
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def snake_case ( *__a , **__a ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowerCAmelCase = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def snake_case ( self , __a , __a ):
__lowerCAmelCase = vqa_pipeline(__a , top_k=1 )
self.assertEqual(
__a , [
[{"score": ANY(__a ), "answer": ANY(__a )}],
[{"score": ANY(__a ), "answer": ANY(__a )}],
] , )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
__lowerCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowerCAmelCase = "How many cats are there?"
__lowerCAmelCase = vqa_pipeline(image=__a , question="How many cats are there?" , top_k=2 )
self.assertEqual(
__a , [{"score": ANY(__a ), "answer": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a )}] )
__lowerCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
__a , [{"score": ANY(__a ), "answer": ANY(__a )}, {"score": ANY(__a ), "answer": ANY(__a )}] )
@slow
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
__lowerCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowerCAmelCase = "How many cats are there?"
__lowerCAmelCase = vqa_pipeline(image=__a , question=__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
__lowerCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
__lowerCAmelCase = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def snake_case ( self ):
pass
| 636
| 1
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''MCTCTFeatureExtractor'''
_lowerCamelCase: Any = '''AutoTokenizer'''
def __init__( self : Tuple ,A_ : List[str] ,A_ : List[str] ) -> Dict:
super().__init__(A__ ,A__ )
A = self.feature_extractor
A = False
def __call__( self : Union[str, Any] ,*A_ : List[Any] ,**A_ : str ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ ,**A__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,A__ )
A = kwargs.pop('sampling_rate' ,A__ )
A = kwargs.pop('text' ,A__ )
if len(A__ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A__ ,*A__ ,sampling_rate=A__ ,**A__ )
if text is not None:
A = self.tokenizer(A__ ,**A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings["""input_ids"""]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*A_ : Dict ,**A_ : str ) -> List[str]:
return self.tokenizer.batch_decode(*A__ ,**A__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : str ,**A_ : List[Any] ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ ,**A__ )
A = kwargs.pop('input_features' ,A__ )
A = kwargs.pop('labels' ,A__ )
if len(A__ ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(A__ ,*A__ ,**A__ )
if labels is not None:
A = self.tokenizer.pad(A__ ,**A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels["""input_ids"""]
return input_features
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,*A_ : str ,**A_ : List[str] ) -> List[str]:
return self.tokenizer.decode(*A__ ,**A__ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False
| 719
|
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 22
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCAmelCase :
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=[1, 1, 2] , _A=1 , _A=32 , _A=4 , _A=8 , _A=37 , _A="gelu_new" , _A=0.1 , _A=0.1 , _A=0.0 , _A=5_12 , _A=3 , _A=0.02 , _A=3 , _A=4 , _A=None , _A=False , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : List[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_token_type_ids
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Optional[Any] = block_sizes
_UpperCAmelCase : Optional[Any] = num_decoder_layers
_UpperCAmelCase : List[str] = d_model
_UpperCAmelCase : List[str] = n_head
_UpperCAmelCase : Any = d_head
_UpperCAmelCase : Optional[int] = d_inner
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = hidden_dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : List[str] = activation_dropout
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : List[str] = num_choices
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : Optional[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
_UpperCAmelCase : Any = n_head
# Used in the tests to check the size of the first hidden state
_UpperCAmelCase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_UpperCAmelCase : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_UpperCAmelCase : str = self.num_hidden_layers + 2
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : int = None
if self.use_input_mask:
_UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : str = None
if self.use_token_type_ids:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> str:
'''simple docstring'''
_UpperCAmelCase : Any = TFFunnelModel(config=lowerCamelCase__ )
_UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : Dict = model(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [input_ids, input_mask]
_UpperCAmelCase : Dict = model(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_UpperCAmelCase : Dict = False
_UpperCAmelCase : str = TFFunnelModel(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_UpperCAmelCase : Any = False
_UpperCAmelCase : Any = TFFunnelModel(config=lowerCamelCase__ )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFFunnelBaseModel(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [input_ids, input_mask]
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_UpperCAmelCase : int = False
_UpperCAmelCase : Optional[int] = TFFunnelBaseModel(config=lowerCamelCase__ )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_UpperCAmelCase : Any = False
_UpperCAmelCase : int = TFFunnelBaseModel(config=lowerCamelCase__ )
_UpperCAmelCase : Any = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : int = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = TFFunnelForMaskedLM(config=lowerCamelCase__ )
_UpperCAmelCase : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : int = TFFunnelForSequenceClassification(config=lowerCamelCase__ )
_UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_choices
_UpperCAmelCase : int = TFFunnelForMultipleChoice(config=lowerCamelCase__ )
_UpperCAmelCase : Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : Any = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : str = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : str = TFFunnelForTokenClassification(config=lowerCamelCase__ )
_UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = TFFunnelForQuestionAnswering(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : int = config_and_inputs
_UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__a : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__a : Dict = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : str = False
__a : Optional[int] = False
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = TFFunnelModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCamelCase__ )
def __snake_case ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def __snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@require_tf
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase):
__a : int = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__a : Tuple = False
__a : List[str] = False
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TFFunnelModelTester(self , base=lowerCamelCase__ )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCamelCase__ )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase__ )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
| 238
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase ={
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721
|
lowerCamelCase ={
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 462
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.