code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase = """\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_UpperCAmelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_UpperCAmelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : Optional[int] ) -> MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] = 1 , lowerCAmelCase : Union[str, Any] = 4 , ) -> Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 173
|
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340
| 0
|
import random
from typing import Any
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list[Any]:
'''simple docstring'''
for _ in range(len(_UpperCamelCase ) ):
lowerCAmelCase : int = random.randint(0, len(_UpperCamelCase ) - 1 )
lowerCAmelCase : int = random.randint(0, len(_UpperCamelCase ) - 1 )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
__A : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
__A : Optional[int] = ['python', 'says', 'hello', '!']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 368
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''MobileNetV2FeatureExtractor''']
__A : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCAmelCase__ = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
lowerCAmelCase__ = '''|'''.join(sys.argv[1:])
lowerCAmelCase__ = re.compile(RF"""^({joined_dirs}).*?\.py$""")
lowerCAmelCase__ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 175
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( A_ : str, A_ : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
_lowerCamelCase : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
else:
_lowerCamelCase : str = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
_lowerCamelCase , _lowerCamelCase : Any = ProphetNetForConditionalGeneration.from_pretrained(
A_, output_loading_info=A_ )
_lowerCamelCase : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
_lowerCamelCase : List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_lowerCamelCase : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
_lowerCamelCase : str = prophet
_lowerCamelCase : List[Any] = prophet_old
else:
_lowerCamelCase : Optional[int] = prophet.prophetnet
_lowerCamelCase : Optional[Any] = prophet_old.model
_lowerCamelCase : Any = False
for attribute in attributes:
if attribute in mapping:
_lowerCamelCase : Optional[int] = mapping[attribute]
if not hasattr(A_, A_ ) and len(A_ ) > 0:
_lowerCamelCase : int = attribute
elif hasattr(A_, A_ ):
_lowerCamelCase : int = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_lowerCamelCase : Optional[int] = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
_lowerCamelCase : List[str] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_lowerCamelCase : int = old_model.bias
logger.info(F'''{attribute} is initialized''' )
_lowerCamelCase : Union[str, Any] = True
break
elif attribute in special_keys and hasattr(A_, '''in_proj_weight''' ):
_lowerCamelCase : Tuple = old_model.in_proj_weight.shape[0] // 3
_lowerCamelCase : List[Any] = getattr(A_, A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_lowerCamelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_lowerCamelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_lowerCamelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_lowerCamelCase : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_lowerCamelCase : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_lowerCamelCase : Optional[Any] = True
break
if attribute.isdigit():
_lowerCamelCase : Optional[int] = model[int(A_ )]
_lowerCamelCase : List[Any] = old_model[int(A_ )]
else:
_lowerCamelCase : List[str] = getattr(A_, A_ )
if old_attribute == "":
_lowerCamelCase : str = old_model
else:
if not hasattr(A_, A_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
_lowerCamelCase : Optional[int] = getattr(A_, A_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 175
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[int]:
requires_backends(cls , ["""flax""", """transformers"""] )
class _lowerCAmelCase ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> int:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Optional[Any]:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
requires_backends(cls , ["""flax""", """transformers"""] )
class _lowerCAmelCase ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Tuple:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
class _lowerCAmelCase ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase = ['''flax''', '''transformers''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ) -> List[Any]:
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Dict:
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def UpperCAmelCase_ ( cls , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
requires_backends(cls , ["""flax""", """transformers"""] )
| 344
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ["pixel_values"]
def __init__( self : str ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : bool = True ,lowercase_ : Dict[str, int] = None ,lowercase_ : bool = True ,lowercase_ : Union[int, float] = 1 / 2_5_5 ,lowercase_ : bool = True ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = True ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = size if size is not None else {'''shortest_edge''': 2_2_4}
lowerCAmelCase__ : Tuple = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase__ : Optional[Any] = get_size_dict(lowercase_ ,default_to_square=lowercase_ ,param_name='''crop_size''' )
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : Optional[int] = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Optional[Any] = do_center_crop
lowerCAmelCase__ : Dict = crop_size
lowerCAmelCase__ : Tuple = do_rescale
lowerCAmelCase__ : str = rescale_factor
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Optional[Any] = do_convert_rgb
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase__ : Optional[Any] = get_resize_output_image_size(lowercase_ ,size=size['''shortest_edge'''] ,default_to_square=lowercase_ )
return resize(lowercase_ ,size=lowercase_ ,resample=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : np.ndarray ,lowercase_ : Dict[str, int] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : str ,):
lowerCAmelCase__ : List[str] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase_ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : np.ndarray ,lowercase_ : Union[int, float] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : Union[str, Any] ,):
return rescale(lowercase_ ,scale=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : np.ndarray ,lowercase_ : Union[float, List[float]] ,lowercase_ : Union[float, List[float]] ,lowercase_ : Optional[Union[str, ChannelDimension]] = None ,**lowercase_ : int ,):
return normalize(lowercase_ ,mean=lowercase_ ,std=lowercase_ ,data_format=lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : ImageInput ,lowercase_ : bool = None ,lowercase_ : Dict[str, int] = None ,lowercase_ : PILImageResampling = None ,lowercase_ : bool = None ,lowercase_ : int = None ,lowercase_ : bool = None ,lowercase_ : float = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : Optional[Union[float, List[float]]] = None ,lowercase_ : bool = None ,lowercase_ : Optional[Union[str, TensorType]] = None ,lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase_ : List[Any] ,):
lowerCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : Union[str, Any] = get_size_dict(lowercase_ ,param_name='''size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Dict = get_size_dict(lowercase_ ,param_name='''crop_size''' ,default_to_square=lowercase_ )
lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : str = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Any = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Tuple = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[int] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase__ : Optional[int] = [self.resize(image=lowercase_ ,size=lowercase_ ,resample=lowercase_ ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Tuple = [self.center_crop(image=lowercase_ ,size=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ : Tuple = [self.rescale(image=lowercase_ ,scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ : Union[str, Any] = [self.normalize(image=lowercase_ ,mean=lowercase_ ,std=lowercase_ ) for image in images]
lowerCAmelCase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ ,lowercase_ ) for image in images]
lowerCAmelCase__ : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowercase_ ,tensor_type=lowercase_ )
| 106
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase: List[str] = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: int = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 361
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase: Any = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Tuple, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : int = 8, **lowerCAmelCase : Optional[int], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Dict = do_rescale
lowercase : Tuple = rescale_factor
lowercase : List[str] = do_pad
lowercase : int = pad_size
def lowercase ( self : List[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : float, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int ) -> np.ndarray:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : int, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> List[Any]:
lowercase , lowercase : Tuple = get_image_size(lowerCAmelCase )
lowercase : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase : Dict = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[float] = None, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCAmelCase : Any, ) -> List[Any]:
lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any = do_pad if do_pad is not None else self.do_pad
lowercase : int = pad_size if pad_size is not None else self.pad_size
lowercase : Tuple = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Optional[int] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_pad:
lowercase : List[str] = [self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images]
lowercase : Optional[int] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53
| 0
|
from math import factorial, radians
def _A ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 18 , SCREAMING_SNAKE_CASE__ : int = 10 ):
UpperCamelCase :Dict = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
UpperCamelCase :Dict = radians(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[Any] = angle_in_radians
UpperCamelCase :List[str] = 3
UpperCamelCase :Optional[int] = -1
for _ in range(SCREAMING_SNAKE_CASE__ ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 259
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=() , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]="no" , SCREAMING_SNAKE_CASE__ : Dict="29500" ):
UpperCamelCase :List[Any] = False
UpperCamelCase :Tuple = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCamelCase :Dict = True
elif "IPython" in sys.modules:
UpperCamelCase :int = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCamelCase :Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , SCREAMING_SNAKE_CASE__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCamelCase :Tuple = 8
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port=SCREAMING_SNAKE_CASE__ , mixed_precision=SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase :Any = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple=() , SCREAMING_SNAKE_CASE__ : int=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE__ , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCamelCase :Optional[int] = PrepareForLaunch(SCREAMING_SNAKE_CASE__ , debug=SCREAMING_SNAKE_CASE__ )
start_processes(SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , nprocs=SCREAMING_SNAKE_CASE__ , start_method='''fork''' )
| 259
| 1
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Any = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCamelCase : List[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_UpperCamelCase : str = min(UpperCAmelCase_ , UpperCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 371
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
_UpperCamelCase : Any = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : int = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 236
| 0
|
from __future__ import annotations
import requests
__lowerCamelCase : int = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : int = 1 , lowerCAmelCase : str = "new" , lowerCAmelCase : list | None = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase ) - valid_terms ) ):
SCREAMING_SNAKE_CASE_ : List[Any] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
SCREAMING_SNAKE_CASE_ : Union[str, Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase )}
SCREAMING_SNAKE_CASE_ : List[str] = {}
for id_ in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 18
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase_ = logging.getLogger()
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
UpperCamelCase__ : Optional[int] = parser.parse_args()
return args.f
class lowercase__ ( __a ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(a__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, '''run_glue_deebert.py''' )
with patch.object(a__, '''argv''', a__ ):
UpperCamelCase__ : int = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(a__, 0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(a__ )
UpperCamelCase__ : Optional[Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(a__ )
UpperCamelCase__ : Optional[Any] = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(a__ )
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247
| 0
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Dict = tempfile.mkdtemp()
UpperCamelCase_ : List[str] = 8
# DPR tok
UpperCamelCase_ : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase_ : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
UpperCamelCase_ : Optional[Any] = os.path.join(snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCamelCase_ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_ : Any = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase_ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_ : int = {'unk_token': '<unk>'}
UpperCamelCase_ : Any = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(snake_case , exist_ok=snake_case )
UpperCamelCase_ : List[str] = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Dict = os.path.join(snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.get_dummy_dataset()
UpperCamelCase_ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCamelCase_ : Union[str, Any] = dataset
UpperCamelCase_ : Optional[Any] = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : bool ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.get_dummy_dataset()
UpperCamelCase_ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
UpperCamelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , 'dataset' )
UpperCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
UpperCamelCase_ : Optional[Any] = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase_ : Any = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case ) , )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
UpperCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
UpperCamelCase_ : int = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(snake_case , open(snake_case , 'wb' ) )
UpperCamelCase_ : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
UpperCamelCase_ : List[Any] = RagRetriever(
snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 1
UpperCamelCase_ : str = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase_ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Any = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCamelCase_ : List[Any] = self.get_dummy_dataset()
retriever.save_pretrained(snake_case )
UpperCamelCase_ : str = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
UpperCamelCase_ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_ : List[str] = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
UpperCamelCase_ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
UpperCamelCase_ : Union[str, Any] = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
UpperCamelCase_ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_ : List[str] = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
UpperCamelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Optional[int] = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , snake_case )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : int = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
UpperCamelCase_ : Tuple = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
UpperCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_ : Tuple = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = 1
UpperCamelCase_ : List[Any] = self.get_dummy_legacy_index_retriever()
UpperCamelCase_ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[str] = retriever.retrieve(snake_case , n_docs=snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , snake_case )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case )
UpperCamelCase_ : Optional[Any] = RagRetriever.from_pretrained(snake_case )
self.assertIsInstance(snake_case , snake_case )
UpperCamelCase_ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_ : Any = retriever.retrieve(snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import torch
UpperCamelCase_ : Dict = 1
UpperCamelCase_ : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase_ : Tuple = [[5, 7], [1_0, 1_1]]
UpperCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_ : Union[str, Any] = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : str = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertIsInstance(snake_case , np.ndarray )
UpperCamelCase_ : List[Any] = retriever(
snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case , return_tensors='pt' , )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : List[str] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case , torch.Tensor )
self.assertIsInstance(snake_case , torch.Tensor )
self.assertIsInstance(snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase_ : Union[str, Any] = 1
UpperCamelCase_ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case )
retriever.set_ctx_encoder_tokenizer(snake_case )
UpperCamelCase_ : Optional[int] = [[5, 7], [1_0, 1_1]]
UpperCamelCase_ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase_ : Dict = retriever(snake_case , snake_case , prefix=retriever.config.generator.prefix , n_docs=snake_case )
self.assertEqual(
len(snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , snake_case ) # check for doc token related keys in dictionary.
| 175
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase_ : str = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase_ : Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
UpperCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(snake_case ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'lower newer'
UpperCamelCase_ : Optional[int] = 'lower newer'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_ : List[str] = 'lower'
UpperCamelCase_ : Optional[int] = ['low', 'er</w>']
UpperCamelCase_ : Optional[Any] = tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
UpperCamelCase_ : List[Any] = tokens + ['<unk>']
UpperCamelCase_ : int = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
UpperCamelCase_ : int = tokenizer.encode('sequence builders' , add_special_tokens=snake_case )
UpperCamelCase_ : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case )
UpperCamelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCamelCase_ : Any = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 175
| 1
|
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
try:
with open(_snake_case , 'rb' ) as flax_state_f:
__UpperCamelCase =from_bytes(_snake_case , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_snake_case ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_snake_case , _snake_case )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__UpperCamelCase =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , _snake_case ) ).values()
if any(_snake_case ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__UpperCamelCase =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _snake_case )
__UpperCamelCase =""
__UpperCamelCase =flatten_dict(_snake_case , sep='.' )
__UpperCamelCase =pt_model.state_dict()
# keep track of unexpected & missing keys
__UpperCamelCase =[]
__UpperCamelCase =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__UpperCamelCase =flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__UpperCamelCase =flax_key_tuple_array[:-1] + ["weight"]
__UpperCamelCase =jnp.transpose(_snake_case , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__UpperCamelCase =flax_key_tuple_array[:-1] + ["weight"]
__UpperCamelCase =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__UpperCamelCase =flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_snake_case ):
__UpperCamelCase =(
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__UpperCamelCase =".".join(_snake_case )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__UpperCamelCase =np.asarray(_snake_case ) if not isinstance(_snake_case , np.ndarray ) else flax_tensor
__UpperCamelCase =torch.from_numpy(_snake_case )
# remove from missing keys
missing_keys.remove(_snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_snake_case )
pt_model.load_state_dict(_snake_case )
# re-transform missing_keys to list
__UpperCamelCase =list(_snake_case )
if len(_snake_case ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(_snake_case ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 369
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Features ):
__UpperCamelCase =np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ = None , A_ = None , A_ = None , A_ = False , A_ = False , A_ = None , **A_ , ) -> Dict:
super().__init__(
A_ , split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , num_proc=A_ , **A_ , )
__UpperCamelCase =path_or_paths if isinstance(A_ , A_ ) else {self.split: path_or_paths}
__UpperCamelCase =_PACKAGED_DATASETS_MODULES['parquet'][1]
__UpperCamelCase =Parquet(
cache_dir=A_ , data_files=A_ , features=A_ , hash=A_ , **A_ , )
def _a ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
__UpperCamelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
self.builder.download_and_prepare(
download_config=A_ , download_mode=A_ , verification_mode=A_ , base_path=A_ , num_proc=self.num_proc , )
__UpperCamelCase =self.builder.as_dataset(
split=self.split , verification_mode=A_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase =dataset
__UpperCamelCase =path_or_buf
__UpperCamelCase =batch_size or get_writer_batch_size(dataset.features )
__UpperCamelCase =parquet_writer_kwargs
def _a ( self ) -> int:
__UpperCamelCase =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__UpperCamelCase =self._write(file_obj=A_ , batch_size=A_ , **self.parquet_writer_kwargs )
else:
__UpperCamelCase =self._write(file_obj=self.path_or_buf , batch_size=A_ , **self.parquet_writer_kwargs )
return written
def _a ( self , A_ , A_ , **A_ ) -> int:
__UpperCamelCase =0
__UpperCamelCase =parquet_writer_kwargs.pop('path_or_buf' , A_ )
__UpperCamelCase =self.dataset.features.arrow_schema
__UpperCamelCase =pq.ParquetWriter(A_ , schema=A_ , **A_ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , A_ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__UpperCamelCase =query_table(
table=self.dataset._data , key=slice(A_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(A_ )
written += batch.nbytes
writer.close()
return written
| 117
| 0
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_a : str = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_a : List[str] = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : str = calculate_rouge(__lowercase ,__lowercase ,bootstrap_aggregation=__lowercase ,rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(__lowercase ,__lowercase )
_lowerCAmelCase : Optional[int] = calculate_rouge(__lowercase ,__lowercase ,bootstrap_aggregation=__lowercase ,rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Dict = """rougeLsum"""
_lowerCAmelCase : Dict = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=[k] )[k]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=[k] )[k]
assert score > score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_lowerCAmelCase : List[str] = ["""rouge1""", """rouge2""", """rougeL"""]
_lowerCAmelCase : Dict = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=__lowercase )
_lowerCAmelCase : List[str] = calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ,rouge_keys=__lowercase )
assert score_sep == score_no_sep
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_lowerCAmelCase : List[str] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
_lowerCAmelCase : Tuple = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase ) == calculate_rouge(__lowercase ,__lowercase ,newline_sep=__lowercase )
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : str = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
_lowerCAmelCase : Any = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
_lowerCAmelCase : Dict = calculate_rouge(__lowercase ,__lowercase ,rouge_keys=["""rougeLsum"""] ,newline_sep=__lowercase )["""rougeLsum"""]
_lowerCAmelCase : Optional[Any] = calculate_rouge(__lowercase ,__lowercase ,rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
_lowerCAmelCase : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) )
assert isinstance(__lowercase ,__lowercase )
_lowerCAmelCase : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) ,data_dir.joinpath("""test.target""" ) ,bootstrap_aggregation=__lowercase )
assert isinstance(__lowercase ,__lowercase )
| 44
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53
| 0
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __snake_case ):
a__ : Union[str, Any] = (IPNDMScheduler,)
a__ : Optional[Any] = (("""num_inference_steps""", 50),)
def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = {"num_train_timesteps": 10_00}
config.update(**UpperCamelCase__ )
return config
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
__lowerCamelCase = dict(self.forward_default_kwargs )
__lowerCamelCase = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
__lowerCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
__lowerCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
__lowerCamelCase = dummy_past_residuals[:]
if time_step is None:
__lowerCamelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
__lowerCamelCase = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
__lowerCamelCase = dummy_past_residuals[:]
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self : Optional[int] ) -> Any:
pass
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
__lowerCamelCase = dict(self.forward_default_kwargs )
__lowerCamelCase = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
__lowerCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCamelCase = dummy_past_residuals[:]
if time_step is None:
__lowerCamelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
__lowerCamelCase = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowerCamelCase = dummy_past_residuals[:]
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__lowerCamelCase = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
__lowerCamelCase = self.scheduler_classes[0]
__lowerCamelCase = self.get_scheduler_config(**UpperCamelCase__ )
__lowerCamelCase = scheduler_class(**UpperCamelCase__ )
__lowerCamelCase = 10
__lowerCamelCase = self.dummy_model()
__lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__lowerCamelCase = model(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def __A ( self : List[str] ) -> Optional[Any]:
__lowerCamelCase = dict(self.forward_default_kwargs )
__lowerCamelCase = kwargs.pop('''num_inference_steps''' , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
__lowerCamelCase = self.get_scheduler_config()
__lowerCamelCase = scheduler_class(**UpperCamelCase__ )
__lowerCamelCase = self.dummy_sample
__lowerCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , '''set_timesteps''' ):
__lowerCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCamelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowerCamelCase = dummy_past_residuals[:]
__lowerCamelCase = scheduler.timesteps[5]
__lowerCamelCase = scheduler.timesteps[6]
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
__lowerCamelCase = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self : Optional[int] ) -> List[Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def __A ( self : Any ) -> Any:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__ )
def __A ( self : Union[str, Any] ) -> Tuple:
__lowerCamelCase = self.full_loop()
__lowerCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 370
|
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool:
return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
'''simple docstring'''
a_ : Union[str, Any] = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
a_ : Tuple = {value: key for key, value in MORSE_CODE_DICT.items()}
def __snake_case ( UpperCAmelCase_ : Any ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __snake_case ( UpperCAmelCase_ : str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __snake_case ( ):
lowerCamelCase_ = "Morse code here!"
print(UpperCAmelCase_ )
lowerCamelCase_ = encrypt(UpperCAmelCase_ )
print(UpperCAmelCase_ )
lowerCamelCase_ = decrypt(UpperCAmelCase_ )
print(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 55
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = "cpu", lowerCamelCase = None ):
lowercase :Optional[Any] = torch.load(lowerCamelCase, map_location=lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCamelCase, torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowercase :List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase :Optional[Any] = src_path
torch.save(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 236
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a__: int =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: int =TFAutoModel.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: List[str] =AutoModel.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a__: Union[str, Any] =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Optional[Any] =TFAutoModelForPreTraining.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Any =AutoModelForPreTraining.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Any ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Tuple =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: str =TFAutoModelForCausalLM.from_pretrained(__lowercase , from_pt=__lowercase )
a__ , a__: Optional[int] =TFAutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Any =AutoModelForCausalLM.from_pretrained(__lowercase , from_tf=__lowercase )
a__ , a__: Optional[int] =AutoModelForCausalLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Union[str, Any] =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Any =TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Optional[Any] =AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Optional[int] ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Any =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Any =TFAutoModelForMaskedLM.from_pretrained(__lowercase , from_pt=__lowercase )
a__ , a__: Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Optional[int] =AutoModelForMaskedLM.from_pretrained(__lowercase , from_tf=__lowercase )
a__ , a__: Union[str, Any] =AutoModelForMaskedLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: str =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: str =TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_pt=__lowercase )
a__ , a__: Optional[int] =TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: str =AutoModelForSeqaSeqLM.from_pretrained(__lowercase , from_tf=__lowercase )
a__ , a__: str =AutoModelForSeqaSeqLM.from_pretrained(
__lowercase , output_loading_info=__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Any ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a__: str =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Any =TFAutoModelForSequenceClassification.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: Tuple =AutoModelForSequenceClassification.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def _lowerCamelCase ( self : Optional[int] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
a__: Dict =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: List[Any] =TFAutoModelForQuestionAnswering.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
a__: List[str] =AutoModelForQuestionAnswering.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def _lowerCamelCase ( self : Dict ):
a__: List[str] =TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
a__: Any =AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
def _lowerCamelCase ( self : Optional[Any] ):
a__: Tuple =TFAutoModelWithLMHead.from_pretrained(__lowercase , from_pt=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
a__: int =AutoModelWithLMHead.from_pretrained(__lowercase , from_tf=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4_4_1_0 )
| 359
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase = 5_00_00
__UpperCAmelCase = 50_00
__UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__)
__UpperCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : int ):
for i in range(__magic_name__ ):
a__: int =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Any , __magic_name__ : Union[str, Any] ):
for i in range(0 , len(__magic_name__ ) , __magic_name__ ):
a__: List[str] =dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(__magic_name__ ):
a__: Optional[Any] =dataset[i]
@get_duration
def __lowerCamelCase ( __magic_name__ : datasets.Dataset , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ):
with dataset.formatted_as(type=__magic_name__ ):
for i in range(0 , __magic_name__ , __magic_name__ ):
a__: List[Any] =dataset[i : i + batch_size]
def __lowerCamelCase ( ):
a__: Union[str, Any] ={"num examples": SPEED_TEST_N_EXAMPLES}
a__: int =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
a__: Optional[Any] =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
a__: str =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
a__: List[str] =generate_example_dataset(
os.path.join(__magic_name__ , "dataset.arrow" ) , __magic_name__ , num_examples=__magic_name__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(__magic_name__ ) )
a__: str =func(__magic_name__ , **__magic_name__ )
print("shuffling dataset" )
a__: List[str] =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(__magic_name__ ) )
a__: Optional[int] =func(
__magic_name__ , **__magic_name__ )
with open(__magic_name__ , "wb" ) as f:
f.write(json.dumps(__magic_name__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 42
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = val
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 10_00
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
if base_model:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 92
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
if length <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247
| 0
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowercase (_A ):
"""simple docstring"""
if not sentence:
return ""
_lowerCAmelCase : str = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 25
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25
| 1
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase_ : List[str] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
UpperCAmelCase_ : Dict = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _A ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="auto" , __lowerCamelCase : List[Any]=-1 , __lowerCamelCase : Optional[int]=0.9 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[Any]=500 , __lowerCamelCase : int="gpt2-large" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : List[str]=1_024 , __lowerCamelCase : Dict=25 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=25 , ):
UpperCamelCase :int = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out
| 38
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case__ : int = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _a ( lowerCamelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase , torch.Tensor ):
return image
elif isinstance(lowerCamelCase , PIL.Image.Image ):
__A = [image]
__A = [trans(img.convert('''RGB''' ) ) for img in image]
__A = torch.stack(lowerCamelCase )
return image
class A_ ( _lowerCamelCase ):
def __init__(self :List[str] , _UpperCamelCase :List[Any] , _UpperCamelCase :List[str] )-> List[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
__A = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
def _lowerCAmelCase (self :int , _UpperCamelCase :Optional[Any] )-> Union[str, Any]:
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Dict , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] )-> Union[str, Any]:
# get the original timestep using init_timestep
__A = min(int(num_inference_steps * strength ) , _UpperCamelCase )
__A = max(num_inference_steps - init_timestep , 0 )
__A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase (self :str , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :List[str] , _UpperCamelCase :int , _UpperCamelCase :Dict=None )-> List[str]:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}""" )
__A = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__A = init_latents.shape
__A = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
print('''add noise to latents at timestep''' , _UpperCamelCase )
__A = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__A = init_latents
return latents
@torch.no_grad()
def __call__(self :List[str] , _UpperCamelCase :Union[torch.FloatTensor, PIL.Image.Image] = None , _UpperCamelCase :float = 0.8 , _UpperCamelCase :int = 1 , _UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase :float = 0.0 , _UpperCamelCase :int = 50 , _UpperCamelCase :Optional[bool] = None , _UpperCamelCase :Optional[str] = "pil" , _UpperCamelCase :bool = True , )-> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_UpperCamelCase )
# 2. Preprocess image
__A = preprocess(_UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
__A , __A = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , self.device )
__A = timesteps[:1].repeat(_UpperCamelCase )
# 4. Prepare latent variables
__A = self.prepare_latents(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.unet.dtype , self.device , _UpperCamelCase )
__A = latents
# 5. Denoising loop
for t in self.progress_bar(_UpperCamelCase ):
# 1. predict noise model_output
__A = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , eta=_UpperCamelCase , use_clipped_model_output=_UpperCamelCase , generator=_UpperCamelCase , ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_UpperCamelCase )
| 117
| 0
|
from __future__ import annotations
from collections import namedtuple
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ):
"""simple docstring"""
lowerCamelCase__ : int =namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase__ : Optional[Any] ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
lowerCamelCase__ : Any =f'''{src_lang}-{tgt_lang}'''
lowerCamelCase__ : Any =f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
lowerCamelCase__ : str =os.path.join(__lowerCamelCase , '''README.md''' )
print(f'''Generating {path}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(__lowerCamelCase )
# make sure we are under the root of the project
_lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
_lowercase : Dict = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase : int = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 272
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : int = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : str = "imagenet-1k-id2label.json"
lowerCAmelCase : Optional[Any] = 1_0_0_0
lowerCAmelCase : Union[str, Any] = "huggingface/label-files"
lowerCAmelCase : int = num_labels
lowerCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : str = idalabel
lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase : str = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
lowerCAmelCase : int = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
lowerCAmelCase : Tuple = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCAmelCase : Dict = [2, 2, 2_0]
lowerCAmelCase : Optional[int] = [3, 1_2, 1_6]
lowerCAmelCase : Union[str, Any] = [1_9_2, 7_6_8, 1_0_2_4]
lowerCAmelCase : str = CvtForImageClassification(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
lowerCAmelCase : str = OrderedDict()
lowerCAmelCase : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCAmelCase : Optional[Any] = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
lowerCAmelCase : Dict = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 108
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=5 ) -> List[Any]:
'''simple docstring'''
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
_UpperCAmelCase = torch.tensor(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase = model(_UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase = logits[0, masked_index, :]
_UpperCAmelCase = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase = prob.topk(k=_UpperCAmelCase , dim=0 )
_UpperCAmelCase = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_UpperCAmelCase ) )] )
_UpperCAmelCase = tokenizer.mask_token
_UpperCAmelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_UpperCAmelCase = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(_UpperCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(_UpperCAmelCase ) , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_UpperCAmelCase , _UpperCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase__ = CamembertTokenizer.from_pretrained("camembert-base")
UpperCAmelCase__ = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
UpperCAmelCase__ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 339
| 0
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not nums:
return 0
_UpperCAmelCase : List[str] = nums[0]
_UpperCAmelCase : Union[str, Any] = 0
for num in nums[1:]:
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Any = 1_6
A_ : Union[str, Any] = 3_2
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16 )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase : str = DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCAmelCase_ ),
"""validation""": dataset["""train"""].select(lowerCAmelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase : Optional[int] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase : Any = 8
else:
_UpperCAmelCase : Dict = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
_UpperCAmelCase : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
# Download the dataset
_UpperCAmelCase : Dict = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_UpperCAmelCase : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : Dict = config["""lr"""]
_UpperCAmelCase : List[Any] = int(config["""num_epochs"""] )
_UpperCAmelCase : str = int(config["""seed"""] )
_UpperCAmelCase : List[Any] = int(config["""batch_size"""] )
_UpperCAmelCase : int = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
_UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_UpperCAmelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = get_fold_dataloaders(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
_UpperCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Dict = outputs.loss
_UpperCAmelCase : int = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase_ )
_UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
_UpperCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCAmelCase_ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase ,_UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase : List[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = torch.stack(lowerCAmelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase : List[str] = metric.compute(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCAmelCase_ )
def snake_case_ ( )-> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCAmelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
_UpperCAmelCase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 349
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : int = 1_6 , _UpperCamelCase : int = 8_8 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , ) ->Any:
super().__init__()
snake_case_ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_UpperCamelCase , attention_head_dim=_UpperCamelCase , in_channels=_UpperCamelCase , num_layers=_UpperCamelCase , dropout=_UpperCamelCase , norm_num_groups=_UpperCamelCase , cross_attention_dim=_UpperCamelCase , attention_bias=_UpperCamelCase , sample_size=_UpperCamelCase , num_vector_embeds=_UpperCamelCase , activation_fn=_UpperCamelCase , num_embeds_ada_norm=_UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
snake_case_ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
snake_case_ = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
snake_case_ = [1, 0]
def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : bool = True , ) ->Optional[Any]:
snake_case_ = hidden_states
snake_case_ = []
snake_case_ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
snake_case_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
snake_case_ = self.transformer_index_for_condition[i]
snake_case_ = self.transformers[transformer_index](
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase , cross_attention_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
snake_case_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
snake_case_ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_UpperCamelCase )
| 8
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """xlnet"""
__lowercase = ["""mems"""]
__lowercase = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=3_20_00 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=True , lowerCAmelCase_="bi" , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=-1 , lowerCAmelCase_=False , lowerCAmelCase_="last" , lowerCAmelCase_=True , lowerCAmelCase_="tanh" , lowerCAmelCase_=0.1 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=5 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = d_model
_snake_case = n_layer
_snake_case = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_snake_case = d_model // n_head
_snake_case = ff_activation
_snake_case = d_inner
_snake_case = untie_r
_snake_case = attn_type
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = dropout
_snake_case = mem_len
_snake_case = reuse_len
_snake_case = bi_data
_snake_case = clamp_len
_snake_case = same_length
_snake_case = summary_type
_snake_case = summary_use_proj
_snake_case = summary_activation
_snake_case = summary_last_dropout
_snake_case = start_n_top
_snake_case = end_n_top
_snake_case = bos_token_id
_snake_case = pad_token_id
_snake_case = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCAmelCase_ , )
_snake_case = kwargs['use_cache']
_snake_case = use_mems_eval
_snake_case = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 208
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase : Optional[int] = float("nan")
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = sys.stdout
lowerCamelCase_ = open(A_ , 'a' )
def __getattr__( self : List[Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return getattr(self.stdout , A_ )
def a__ ( self : int , A_ : int ) -> List[str]:
"""simple docstring"""
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , A_ , 0 , re.M ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str=80 , lowercase : Tuple=False ):
'''simple docstring'''
lowerCamelCase_ = []
# deal with critical env vars
lowerCamelCase_ = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase_ = os.environ.get(lowercase , lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCamelCase_ = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ = []
lowerCamelCase_ = ''
while len(lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(lowercase ) == 0 or len(lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase )
lowerCamelCase_ = ''
return "\\\n".join(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCamelCase_ = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : List[str] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCamelCase_ = subprocess.run(lowercase , capture_output=lowercase , text=lowercase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase_ = variation.replace(' ' , '-' )
with open(Path(lowercase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowercase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Dict , lowercase : Any , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCamelCase_ = f"""{preamble}: """
lowerCamelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase ) , desc=lowercase , leave=lowercase ):
lowerCamelCase_ = process_run_single(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase ):
metrics.append(lowercase )
results.append(lowercase )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ = f"""\33[2K\r{outcome}"""
if len(lowercase ) > 0:
lowerCamelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ = f"""{outcome} {mean_target}"""
if len(lowercase ) > 1:
results_str += f""" {tuple(round(lowercase , 2 ) for x in results )}"""
print(lowercase )
lowerCamelCase_ = variation
return mean_metrics
else:
print(lowercase )
return {variation_key: variation, target_metric_key: nan}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = pd.DataFrame(lowercase )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = 'diff_%'
lowerCamelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase ):
lowerCamelCase_ = df.apply(
lambda lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ = df.reindex(lowercase , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase_ = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase_ = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase , floatfmt='.2f' )]
print('\n\n'.join(lowercase ) )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowercase , type=lowercase , required=lowercase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowercase , type=lowercase , nargs='+' , required=lowercase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowercase , type=lowercase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowercase , type=lowercase , required=lowercase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowercase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowercase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowercase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowercase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.output_dir
Path(lowercase ).mkdir(exist_ok=lowercase )
lowerCamelCase_ = get_base_command(lowercase , lowercase )
# split each dimension into its --foo variations
lowerCamelCase_ = [list(map(str.strip , re.split(r'\|' , lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ = list(map(str.strip , map(' '.join , itertools.product(*lowercase ) ) ) )
lowerCamelCase_ = max(len(lowercase ) for x in variations )
# split wanted keys
lowerCamelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCamelCase_ = Tee(lowercase )
print(f"""\n*** Running {len(lowercase )} benchmarks:""" )
print(f"""Base command: {" ".join(lowercase )}""" )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = []
for id, variation in enumerate(tqdm(lowercase , desc='Total completion: ' , leave=lowercase ) ):
lowerCamelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase , lowercase , lowercase , lowercase , args.target_metric_key , lowercase , args.repeat_times , lowercase , args.verbose , ) )
process_results(lowercase , args.target_metric_key , lowercase , args.base_variation , lowercase )
if __name__ == "__main__":
main()
| 208
| 1
|
"""simple docstring"""
def lowercase_ ( _snake_case = 1_000 ):
SCREAMING_SNAKE_CASE__ : int = 2**power
SCREAMING_SNAKE_CASE__ : Optional[int] = str(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for i in list_num:
sum_of_num += int(_snake_case )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCAmelCase__ : List[str] = solution(power)
print('Sum of the digits is: ', result)
| 25
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25
| 1
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : float
__SCREAMING_SNAKE_CASE : TreeNode | None = None
__SCREAMING_SNAKE_CASE : TreeNode | None = None
def __snake_case ( SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(SCREAMING_SNAKE_CASE__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE__ : TreeNode | None , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE__ )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE__ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Any = "\nHuman: <<task>>\n\nAssistant: "
_lowerCAmelCase : str = "huggingface-tools/default-prompts"
_lowerCAmelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int="run" ) -> int:
'''simple docstring'''
if prompt_or_repo_id is None:
_UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , SCREAMING_SNAKE_CASE__ ) is not None:
return prompt_or_repo_id
_UpperCAmelCase : Dict = cached_file(
SCREAMING_SNAKE_CASE__ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
return f.read()
| 202
| 1
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
| 267
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : str = '''EncodecFeatureExtractor'''
UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.feature_extractor
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase)
def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if text is not None:
lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase)
if audio is not None:
lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
lowerCAmelCase = audio_inputs["""padding_mask"""]
return inputs
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase)
lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase)
else:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = to_numpy(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape
if padding_mask is None:
return list(__lowerCAmelCase)
lowerCAmelCase = to_numpy(__lowerCAmelCase)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase = seq_len - padding_mask.shape[-1]
lowerCAmelCase = 1 - self.feature_extractor.padding_value
lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase)
lowerCAmelCase = audio_values.tolist()
for i in range(__lowerCAmelCase):
lowerCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1)
return audio_values
| 272
| 0
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase : Optional[Any] = logging.getLogger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = "token-classification"
def __init__( self , A ) -> str:
'''simple docstring'''
if type(A ) == dict:
lowerCamelCase = Namespace(**A )
lowerCamelCase = import_module("""tasks""" )
try:
lowerCamelCase = getattr(A , hparams.task_type )
lowerCamelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowerCamelCase = self.token_classification_task.get_labels(hparams.labels )
lowerCamelCase = CrossEntropyLoss().ignore_index
super().__init__(A , len(self.labels ) , self.mode )
def __A ( self , **A ) -> str:
'''simple docstring'''
return self.model(**A )
def __A ( self , A , A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase = self(**A )
lowerCamelCase = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.hparams
for mode in ["train", "dev", "test"]:
lowerCamelCase = self._feature_file(A )
if os.path.exists(A ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A )
lowerCamelCase = torch.load(A )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCamelCase = self.token_classification_task.read_examples_from_file(args.data_dir , A )
lowerCamelCase = self.token_classification_task.convert_examples_to_features(
A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A )
torch.save(A , A )
def __A ( self , A , A , A = False ) -> DataLoader:
'''simple docstring'''
lowerCamelCase = self._feature_file(A )
logger.info("""Loading features from cached file %s""" , A )
lowerCamelCase = torch.load(A )
lowerCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCamelCase = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCamelCase = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A , A , A , A ) , batch_size=A )
def __A ( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
"""Compute validation""" ""
lowerCamelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCamelCase = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCamelCase = self(**A )
lowerCamelCase , lowerCamelCase = outputs[:2]
lowerCamelCase = logits.detach().cpu().numpy()
lowerCamelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCamelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCamelCase = np.argmax(A , axis=2 )
lowerCamelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCamelCase = dict(enumerate(self.labels ) )
lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCamelCase = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(A , A ),
"""precision""": precision_score(A , A ),
"""recall""": recall_score(A , A ),
"""f1""": fa_score(A , A ),
}
lowerCamelCase = dict(results.items() )
lowerCamelCase = results
return ret, preds_list, out_label_list
def __A ( self , A ) -> str:
'''simple docstring'''
lowerCamelCase , lowerCamelCase , lowerCamelCase = self._eval_end(A )
lowerCamelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase , lowerCamelCase , lowerCamelCase = self._eval_end(A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCamelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __A ( A , A ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(A , A )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : Tuple = NERTransformer(args)
UpperCAmelCase : List[Any] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase : List[str] = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCAmelCase : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 66
|
UpperCAmelCase : Tuple = "Tobias Carryer"
from time import time
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A , A=int(time() ) ) -> Optional[int]: # noqa: B008
'''simple docstring'''
lowerCamelCase = multiplier
lowerCamelCase = increment
lowerCamelCase = modulo
lowerCamelCase = seed
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase : List[Any] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 66
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ):
'''simple docstring'''
if not nums:
return 0
__UpperCamelCase = nums[0]
__UpperCamelCase = 0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase = (
max_excluding + num,
max(__A ,__A ),
)
return max(__A ,__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__ : int = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['LayoutLMv3FeatureExtractor']
a__ : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = '''detr'''
__a: Optional[Any] = ['''past_key_values''']
__a: Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Union[str, Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase_ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = backbone_config.get('model_type' )
lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ = config_class.from_dict(lowercase_ )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None, None, None
lowerCAmelCase_ = use_timm_backbone
lowerCAmelCase_ = backbone_config
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = num_queries
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = init_xavier_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = auxiliary_loss
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = backbone
lowerCAmelCase_ = use_pretrained_backbone
lowerCAmelCase_ = dilation
# Hungarian matcher
lowerCAmelCase_ = class_cost
lowerCAmelCase_ = bbox_cost
lowerCAmelCase_ = giou_cost
# Loss coefficients
lowerCAmelCase_ = mask_loss_coefficient
lowerCAmelCase_ = dice_loss_coefficient
lowerCAmelCase_ = bbox_loss_coefficient
lowerCAmelCase_ = giou_loss_coefficient
lowerCAmelCase_ = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def _lowercase ( cls , lowercase_ , **lowercase_ ) -> List[str]:
'''simple docstring'''
return cls(backbone_config=lowercase_ , **lowercase_ )
def _lowercase ( self ) -> Dict[str, any]:
'''simple docstring'''
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ = self.backbone_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return 1_2
| 14
|
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_UpperCamelCase = 'base_with_context'
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCamelCase : Any = weights[F'layers_{lyr_num}']
__lowerCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowerCamelCase : List[Any] = ly_weight['attention']
__lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) ,requires_grad=_lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
__lowerCamelCase : Dict = weights[F'layers_{lyr_num}']
__lowerCamelCase : Any = ly_weight['attention']
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) ,requires_grad=_lowerCAmelCase )
__lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowerCamelCase : List[str] = weights[F'layers_{lyr_num}']
__lowerCamelCase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__lowerCamelCase : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__lowerCamelCase : Any = ly_weight['self_attention']
__lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCamelCase : Dict = ly_weight['MultiHeadDotProductAttention_0']
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( _lowerCAmelCase ) -> List[Any]:
__lowerCamelCase : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowerCamelCase : int = jnp.tree_util.tree_map(onp.array ,_lowerCAmelCase )
__lowerCamelCase : str = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__lowerCamelCase : Optional[Any] = os.path.join(args.checkpoint_path ,'..' ,'config.gin' )
__lowerCamelCase : Any = inference.parse_training_gin_file(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : List[Any] = inference.InferenceModel(args.checkpoint_path ,_lowerCAmelCase )
__lowerCamelCase : str = DDPMScheduler(beta_schedule='squaredcos_cap_v2' ,variance_type='fixed_large' )
__lowerCamelCase : str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj='gated-gelu' ,)
__lowerCamelCase : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length['targets_context'] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj='gated-gelu' ,)
__lowerCamelCase : int = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length['targets_context'] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
__lowerCamelCase : List[str] = load_notes_encoder(ta_checkpoint['target']['token_encoder'] ,_lowerCAmelCase )
__lowerCamelCase : List[Any] = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] ,_lowerCAmelCase )
__lowerCamelCase : Dict = load_decoder(ta_checkpoint['target']['decoder'] ,_lowerCAmelCase )
__lowerCamelCase : str = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__lowerCamelCase : List[str] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCAmelCase ,continuous_encoder=_lowerCAmelCase ,decoder=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,melgan=_lowerCAmelCase ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_UpperCamelCase = parser.parse_args()
main(args)
| 208
|
'''simple docstring'''
from collections.abc import Sequence
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> float:
__lowerCamelCase : Any = 0.0
for coeff in reversed(_lowerCAmelCase ):
__lowerCamelCase : Tuple = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 208
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A_ : Tuple = random.Random()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=1.0 , UpperCAmelCase__=None , UpperCAmelCase__=None ) -> Optional[int]:
if rng is None:
UpperCamelCase_: List[Any] = global_rng
UpperCamelCase_: Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=4_0_0 , _lowerCamelCase=2_0_0_0 , _lowerCamelCase=2_4 , _lowerCamelCase=2_4 , _lowerCamelCase=0.0 , _lowerCamelCase=1_6_0_0_0 , _lowerCamelCase=True , _lowerCamelCase=True , ):
UpperCamelCase_: Any = parent
UpperCamelCase_: int = batch_size
UpperCamelCase_: List[Any] = min_seq_length
UpperCamelCase_: Optional[Any] = max_seq_length
UpperCamelCase_: List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_: Tuple = feature_size
UpperCamelCase_: int = num_mel_bins
UpperCamelCase_: Union[str, Any] = padding_value
UpperCamelCase_: Optional[int] = sampling_rate
UpperCamelCase_: Optional[Any] = return_attention_mask
UpperCamelCase_: List[str] = do_normalize
def _a ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , _lowerCamelCase=False , _lowerCamelCase=False ):
def _flatten(_lowerCamelCase ):
return list(itertools.chain(*__A ) )
if equal_length:
UpperCamelCase_: Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_: Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_: str = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =SpeechaTextFeatureExtractor if is_speech_available() else None
def _a ( self ):
UpperCamelCase_: Dict = SpeechaTextFeatureExtractionTester(self )
def _a ( self , _lowerCamelCase ):
self.assertTrue(np.all(np.mean(__A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A , axis=0 ) - 1 ) < 1e-3 ) )
def _a ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase_: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_: Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_: Dict = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_: List[str] = feature_extractor(__A , padding=__A , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_: Dict = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
UpperCamelCase_: str = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
UpperCamelCase_: str = feature_extractor(__A , return_tensors='np' ).input_features
UpperCamelCase_: Any = feature_extractor(__A , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_: List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
UpperCamelCase_: Optional[Any] = np.asarray(__A )
UpperCamelCase_: str = feature_extractor(__A , return_tensors='np' ).input_features
UpperCamelCase_: Optional[int] = feature_extractor(__A , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
def _a ( self ):
UpperCamelCase_: Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_: Dict = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_: Union[str, Any] = [None, 1_6, None]
for max_length, padding in zip(__A , __A ):
UpperCamelCase_: Dict = feature_extractor(
__A , padding=__A , max_length=__A , return_attention_mask=__A )
UpperCamelCase_: List[Any] = inputs.input_features
UpperCamelCase_: Any = inputs.attention_mask
UpperCamelCase_: Optional[int] = [np.sum(__A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _a ( self ):
UpperCamelCase_: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_: Any = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase_: List[str] = [None, 1_6, None]
for max_length, padding in zip(__A , __A ):
UpperCamelCase_: List[str] = feature_extractor(
__A , max_length=__A , padding=__A , return_tensors='np' , return_attention_mask=__A )
UpperCamelCase_: int = inputs.input_features
UpperCamelCase_: Dict = inputs.attention_mask
UpperCamelCase_: Dict = [np.sum(__A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_: Tuple = feature_extractor(
__A , padding='max_length' , max_length=4 , truncation=__A , return_tensors='np' , return_attention_mask=__A , )
UpperCamelCase_: List[str] = inputs.input_features
UpperCamelCase_: List[str] = inputs.attention_mask
UpperCamelCase_: Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _a ( self ):
UpperCamelCase_: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: List[str] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_: Optional[int] = feature_extractor(
__A , padding='longest' , max_length=4 , truncation=__A , return_tensors='np' , return_attention_mask=__A , )
UpperCamelCase_: int = inputs.input_features
UpperCamelCase_: int = inputs.attention_mask
UpperCamelCase_: Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
UpperCamelCase_: Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
UpperCamelCase_: str = feature_extractor(
__A , padding='longest' , max_length=1_6 , truncation=__A , return_tensors='np' , return_attention_mask=__A , )
UpperCamelCase_: Tuple = inputs.input_features
UpperCamelCase_: Union[str, Any] = inputs.attention_mask
UpperCamelCase_: Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def _a ( self ):
import torch
UpperCamelCase_: str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: int = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
UpperCamelCase_: List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_: str = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_: Any = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , _lowerCamelCase ):
from datasets import load_dataset
UpperCamelCase_: int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
UpperCamelCase_: List[Any] = ds.sort('id' ).select(range(__A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ):
# fmt: off
UpperCamelCase_: List[str] = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
UpperCamelCase_: List[str] = self._load_datasamples(1 )
UpperCamelCase_: List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_: List[str] = feature_extractor(__A , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , __A , atol=1e-4 ) )
| 359
|
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
while b:
UpperCamelCase_ ,UpperCamelCase_: int = b, a % b
return a
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def snake_case () -> int:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 292
| 0
|
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_A : int = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
_A : Optional[Any] = """sshleifer/student_marian_en_ro_6_1"""
_A : Dict = """sshleifer/tiny-mbart"""
@require_torch
class a__ ( a_ ):
def __magic_name__ ( self , _a=False , _a=None , _a=True , _a=True , _a=True , _a=True , ):
lowercase : Union[str, Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_a , num_train_epochs=1 , distributed=_a , extra_args_str=_a , predict_with_generate=_a , do_train=_a , do_eval=_a , do_predict=_a , )
lowercase : Optional[Any] = TrainerState.load_from_json(os.path.join(_a , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowercase : Dict = [log for log in logs if "eval_loss" in log.keys()]
lowercase : Union[str, Any] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase : Any = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , _a )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __magic_name__ ( self ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __magic_name__ ( self ):
self.run_seqaseq_quick(distributed=_a )
@require_torch_multi_gpu
def __magic_name__ ( self ):
self.run_seqaseq_quick(distributed=_a )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__ ( self ):
self.run_seqaseq_quick(distributed=_a , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__ ( self ):
self.run_seqaseq_quick(distributed=_a , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__ ( self ):
self.run_seqaseq_quick(distributed=_a , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=_a )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def __magic_name__ ( self ):
self.run_seqaseq_quick(
distributed=_a , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=_a )
@require_apex
@require_torch_gpu
def __magic_name__ ( self ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_a , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_a , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def __magic_name__ ( self , _a ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowercase : Optional[Any] = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowercase : Union[str, Any] = experiments[experiment_id]
lowercase : str = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowercase : int = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_a , extra_args_str=data["extra_args_str"] )
lowercase : Optional[int] = len(re.findall(_a , cl.err ) )
self.assertEqual(_a , data["n_matches"] )
@slow
def __magic_name__ ( self ):
lowercase : List[Any] = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_a , learning_rate=3E-4 , num_train_epochs=10 , distributed=_a , )
# Check metrics
lowercase : List[str] = TrainerState.load_from_json(os.path.join(_a , "trainer_state.json" ) ).log_history
lowercase : Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()]
lowercase : Optional[int] = eval_metrics[0]
lowercase : Optional[int] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , _a )
# test if do_predict saves generations and metrics
lowercase : Optional[int] = os.listdir(_a )
lowercase : List[str] = {os.path.basename(_a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __magic_name__ ( self ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_a ) -> Tuple[int, float]:
lowercase : int = "--skip_memory_metrics 0"
lowercase : Any = self.run_trainer(
max_len=128 , model_name=_a , learning_rate=3E-4 , num_train_epochs=1 , optim=_a , distributed=_a , extra_args_str=_a , do_eval=_a , do_predict=_a , n_gpus_to_use=1 , )
# Check metrics
lowercase : Any = TrainerState.load_from_json(Path(_a , "trainer_state.json" ) ).log_history
lowercase : Dict = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowercase : Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowercase : str = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase , lowercase , lowercase : Any = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase , lowercase , lowercase : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase : Any = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase : List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase : Tuple = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase : str = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase : Optional[Any] = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_a , _a , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
_a , _a , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
_a , _a , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def __magic_name__ ( self , _a , _a , _a , _a = 3E-3 , _a = "adafactor" , _a = False , _a = None , _a = 0 , _a = True , _a = True , _a = True , _a = True , _a = None , ):
lowercase : List[Any] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowercase : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase : Any = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_a )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_a )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
lowercase : str = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_a )}
""".split()
lowercase : Any = "\n --do_predict\n ".split()
lowercase : Tuple = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase : Optional[int] = get_gpu_count()
lowercase : Dict = get_torch_dist_unique_port()
lowercase : Optional[int] = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
lowercase : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_a , env=self.get_env() )
else:
lowercase : Optional[int] = ["run_translation.py"] + args
with patch.object(_a , "argv" , _a ):
main()
return output_dir
| 202
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "laion/clap-htsat-unfused"
lowercase : Optional[int] = tempfile.mkdtemp()
def __magic_name__ ( self , **_a ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self , **_a ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_tokenizer()
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : int = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[int] = self.get_feature_extractor(do_normalize=_a , padding_value=1.0 )
lowercase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : List[str] = self.get_tokenizer()
lowercase : int = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Dict = floats_list((3, 1_000) )
lowercase : str = feature_extractor(_a , return_tensors="np" )
lowercase : Dict = processor(audios=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Dict = self.get_feature_extractor()
lowercase : int = self.get_tokenizer()
lowercase : Dict = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Optional[Any] = "This is a test string"
lowercase : Any = processor(text=_a )
lowercase : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Optional[int] = self.get_feature_extractor()
lowercase : Any = self.get_tokenizer()
lowercase : Union[str, Any] = ClapProcessor(tokenizer=_a , feature_extractor=_a )
lowercase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.batch_decode(_a )
lowercase : Optional[int] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_feature_extractor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = ClapProcessor(tokenizer=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 202
| 1
|
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = VGroup(__a , __a ).arrange(__a , buff=0 )
snake_case = Text("""CPU""" , font_size=24 )
snake_case = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
snake_case = [mem.copy() for i in range(1 )]
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = Text("""GPU""" , font_size=24 )
snake_case = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.align_to(__a , __a )
gpu.set_x(gpu.get_x() - 1 )
self.add(__a )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = Text("""Model""" , font_size=24 )
snake_case = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.play(
Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , )
snake_case = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=2.5 ) , Write(__a ) , Write(__a ) )
self.add(__a )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(__a ):
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
cpu_target.move_to(__a )
cpu_target.generate_target()
snake_case = 0.46 / 4
snake_case = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__a , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__a , buff=0.0 )
cpu_targs.append(__a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__a ) )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 361
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :Union[str, Any] = controlnet_params
snake_case_ :Union[str, Any] = """bird"""
snake_case_ :List[Any] = jax.device_count()
snake_case_ :List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ :List[str] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ :Any = jax.random.PRNGKey(0 )
snake_case_ :List[str] = jax.random.split(snake_case , jax.device_count() )
snake_case_ :List[Any] = replicate(snake_case )
snake_case_ :List[str] = shard(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :Dict = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :Union[str, Any] = images[0, 253:256, 253:256, -1]
snake_case_ :str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case_, snake_case_ :List[Any] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_, snake_case_ :int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=snake_case , from_pt=snake_case , dtype=jnp.bfloataa )
snake_case_ :str = controlnet_params
snake_case_ :Optional[int] = """Chef in the kitchen"""
snake_case_ :Union[str, Any] = jax.device_count()
snake_case_ :Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ :str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ :Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ :str = jax.random.PRNGKey(0 )
snake_case_ :str = jax.random.split(snake_case , jax.device_count() )
snake_case_ :Tuple = replicate(snake_case )
snake_case_ :str = shard(snake_case )
snake_case_ :int = shard(snake_case )
snake_case_ :List[str] = pipe(
prompt_ids=snake_case , image=snake_case , params=snake_case , prng_seed=snake_case , num_inference_steps=50 , jit=snake_case , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case_ :str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ :int = images[0, 253:256, 253:256, -1]
snake_case_ :Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ :Optional[int] = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 66
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = MobileBertTokenizer
lowerCamelCase__ = MobileBertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
lowerCamelCase__ = '''google/mobilebert-uncased'''
def __a ( self ) -> Optional[Any]:
super().setUp()
lowerCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , _a ) -> Any:
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = "unwanted, running"
return input_text, output_text
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a )
lowerCAmelCase_ = "UNwant\u00E9d,running"
lowerCAmelCase_ = tokenizer.tokenize(_a )
lowerCAmelCase_ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> Dict:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> Any:
lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
lowerCAmelCase_ = {}
for i, token in enumerate(_a ):
lowerCAmelCase_ = i
lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> List[str]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Dict:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False
lowerCAmelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = ["的", "人", "有"]
lowerCAmelCase_ = "".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 22
| 1
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''detr'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : List[str]=100 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Any=2_048 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Any=2_048 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : int=256 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]="sine" , UpperCAmelCase__ : Union[str, Any]="resnet50" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=0.1 , **UpperCAmelCase__ : str , ) ->List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.get('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
return cls(backbone_config=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict[str, any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
return 12
| 14
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = import_module('tasks' )
try:
SCREAMING_SNAKE_CASE : Dict = getattr(__UpperCamelCase ,model_args.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' ,__UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
SCREAMING_SNAKE_CASE : List[str] = token_classification_task.get_labels(data_args.labels )
SCREAMING_SNAKE_CASE : Dict[int, str] = dict(enumerate(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = len(__UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__UpperCamelCase ,idalabel=__UpperCamelCase ,labelaid={label: i for i, label in enumerate(__UpperCamelCase )} ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,)
SCREAMING_SNAKE_CASE : Dict = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,)
# Get datasets
SCREAMING_SNAKE_CASE : str = (
TokenClassificationDataset(
token_classification_task=__UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,labels=__UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE : Optional[int] = (
TokenClassificationDataset(
token_classification_task=__UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,labels=__UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def align_predictions(__UpperCamelCase: np.ndarray ,__UpperCamelCase: np.ndarray ) -> Tuple[List[int], List[int]]:
SCREAMING_SNAKE_CASE : List[str] = np.argmax(__UpperCamelCase ,axis=2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = preds.shape
SCREAMING_SNAKE_CASE : str = [[] for _ in range(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : List[Any] = [[] for _ in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__UpperCamelCase: EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = align_predictions(p.predictions ,p.label_ids )
return {
"accuracy_score": accuracy_score(__UpperCamelCase ,__UpperCamelCase ),
"precision": precision_score(__UpperCamelCase ,__UpperCamelCase ),
"recall": recall_score(__UpperCamelCase ,__UpperCamelCase ),
"f1": fa_score(__UpperCamelCase ,__UpperCamelCase ),
}
# Data collator
SCREAMING_SNAKE_CASE : List[Any] = DataCollatorWithPadding(__UpperCamelCase ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE : List[str] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,compute_metrics=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE : Any = trainer.evaluate()
SCREAMING_SNAKE_CASE : Tuple = os.path.join(training_args.output_dir ,'eval_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
# Predict
if training_args.do_predict:
SCREAMING_SNAKE_CASE : Tuple = TokenClassificationDataset(
token_classification_task=__UpperCamelCase ,data_dir=data_args.data_dir ,tokenizer=__UpperCamelCase ,labels=__UpperCamelCase ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = trainer.predict(__UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = align_predictions(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = os.path.join(training_args.output_dir ,'test_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase ,'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' ,__UpperCamelCase ,__UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(training_args.output_dir ,'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(__UpperCamelCase ,'w' ) as writer:
with open(os.path.join(data_args.data_dir ,'test.txt' ) ,'r' ) as f:
token_classification_task.write_predictions_to_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return results
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 246
|
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCamelCase_ = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCamelCase_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ), reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
], )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(A, A, sample_weight=A ) ),
}
| 246
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase__ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
lowerCAmelCase__ = json.load(f)
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowercase )
def UpperCamelCase ( self , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = F'facebook/wmt19-{pair}'
A__ = self.get_tokenizer(lowercase )
A__ = self.get_model(lowercase )
A__ = bleu_data[pair]["src"]
A__ = bleu_data[pair]["tgt"]
A__ = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
A__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
A__ = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
A__ = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["bleu"] , lowercase )
| 68
|
"""simple docstring"""
import math
import sys
def A__ ( UpperCamelCase ):
A = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
A = binary_file.read()
for dat in data:
A = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = {"0": "0", "1": "1"}
A, A = "", ""
A = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
A = {}
for curr_key in list(UpperCamelCase ):
A = lexicon.pop(UpperCamelCase )
A = new_lex
A = last_match_id + "1"
index += 1
A = ""
return result
def A__ ( UpperCamelCase , UpperCamelCase ):
A = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def A__ ( UpperCamelCase ):
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def A__ ( UpperCamelCase , UpperCamelCase ):
A = read_file_binary(UpperCamelCase )
A = remove_prefix(UpperCamelCase )
A = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 292
| 0
|
'''simple docstring'''
from manim import *
class A__ ( UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Any = Rectangle(height=0.25 , width=0.25 )
_UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : int = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("CPU" , font_size=2_4 )
_UpperCAmelCase : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
_UpperCAmelCase : Dict = [mem.copy() for i in range(4 )]
_UpperCAmelCase : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : List[Any] = Text("GPU" , font_size=2_4 )
_UpperCAmelCase : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
_UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : Tuple = Text("Model" , font_size=2_4 )
_UpperCAmelCase : Dict = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
_UpperCAmelCase : str = []
_UpperCAmelCase : int = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
_UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=2_4 )
_UpperCAmelCase : List[str] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Any = []
for i, rect in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Any = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
_UpperCAmelCase : Any = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Any = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
_UpperCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase : int = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : List[Any] = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
_UpperCAmelCase : List[Any] = Text("Disk" , font_size=2_4 )
_UpperCAmelCase : Dict = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
_UpperCAmelCase : List[str] = []
for i, rect in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : List[Any] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
_UpperCAmelCase : Union[str, Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 17
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = BarthezTokenizer
UpperCamelCase_ : List[Any] = BarthezTokenizerFast
UpperCamelCase_ : Optional[int] = True
UpperCamelCase_ : Optional[int] = True
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : Tuple = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "<pad>"
_UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase__ ) , 1_0_1_1_2_2 )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : Optional[int] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_UpperCAmelCase : int = self.tokenizer(
lowerCAmelCase__ , max_length=len(lowerCAmelCase__ ) , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = {"input_ids": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase : Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowerCAmelCase__ , )
| 17
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : torch.FloatTensor
class SCREAMING_SNAKE_CASE_ ( __a , __a ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 8_8 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ):
super().__init__()
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = attention_head_dim
__SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE = in_channels
__SCREAMING_SNAKE_CASE = torch.nn.GroupNorm(num_groups=lowerCAmelCase__ , num_channels=lowerCAmelCase__ , eps=1E-6 , affine=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__)
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dropout=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , double_self_attention=lowerCAmelCase__ , norm_elementwise_affine=lowerCAmelCase__ , )
for d in range(lowerCAmelCase__)
])
__SCREAMING_SNAKE_CASE = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__ = True , ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = batch_frames // num_frames
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = hidden_states[None, :].reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_states.permute(0 , 2 , 1 , 3 , 4)
__SCREAMING_SNAKE_CASE = self.norm(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.proj_in(lowerCAmelCase__)
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE = block(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , class_labels=lowerCAmelCase__ , )
# 3. Output
__SCREAMING_SNAKE_CASE = self.proj_out(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = (
hidden_states[None, None, :]
.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
__SCREAMING_SNAKE_CASE = hidden_states.reshape(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowerCAmelCase__)
| 100
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase : Optional[int] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''input_features''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=16000 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = num_mel_bins
A : Tuple = do_ceptral_normalize
A : Dict = normalize_means
A : List[Any] = normalize_vars
A : List[str] = True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : List[Any] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A : Any = torch.from_numpy(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
A : Any = ta_kaldi.fbank(SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
A : Dict = x[:input_length].mean(axis=0 )
A : Optional[Any] = np.subtract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if normalize_vars:
A : str = x[:input_length].std(axis=0 )
A : int = np.divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
A : List[str] = padding_value
# make sure array is in float32
A : Tuple = x.astype(np.floataa )
return x
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
"""simple docstring"""
A : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[Any] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : Any = [raw_speech]
# extract fbank features
A : List[str] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in raw_speech]
# convert into correct format for padding
A : str = BatchFeature({'''input_features''': features} )
A : Union[str, Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# make sure list is in array format
A : List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
A : Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A : Dict = (
np.array(SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A : List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE )
if return_tensors is not None:
A : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
| 3
| 0
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( a_ ,a_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , __lowerCamelCase : bool , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None ) -> Dict:
super().__init__()
A : Optional[Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
A : Tuple = torch.zeros(lowercase_ , lowercase_ )
else:
A : Any = None
A : List[str] = torch.nn.Parameter(lowercase_ )
class lowerCamelCase_ ( a_ ):
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
def __init__( self : Any , __lowerCamelCase : VQModel , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : TransformeraDModel , __lowerCamelCase : VQDiffusionScheduler , __lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[int]:
super().__init__()
self.register_modules(
vqvae=lowercase_ , transformer=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , scheduler=lowercase_ , learned_classifier_free_sampling_embeddings=lowercase_ , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> str:
A : int = len(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else 1
# get prompt text embeddings
A : int = self.tokenizer(
lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
A : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
A : str = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
A : Tuple = prompt_embeds.repeat_interleave(lowercase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
A : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
A : str = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_ , 1 , 1 )
else:
A : List[Any] = [''''''] * batch_size
A : Any = text_input_ids.shape[-1]
A : Optional[Any] = self.tokenizer(
lowercase_ , padding="max_length" , max_length=lowercase_ , truncation=lowercase_ , return_tensors="pt" , )
A : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
A : List[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A : Dict = negative_prompt_embeds.shape[1]
A : List[Any] = negative_prompt_embeds.repeat(1 , lowercase_ , 1 )
A : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : int = 1_00 , __lowerCamelCase : float = 5.0 , __lowerCamelCase : float = 1.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ):
A : int = 1
elif isinstance(lowercase_ , lowercase_ ):
A : Optional[int] = len(lowercase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}""" )
A : Any = batch_size * num_images_per_prompt
A : List[Any] = guidance_scale > 1.0
A : Optional[int] = self._encode_prompt(lowercase_ , lowercase_ , lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowercase_ )}.""" )
# get the initial completely masked latents unless the user supplied it
A : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
A : List[str] = self.transformer.num_vector_embeds - 1
A : Any = torch.full(lowercase_ , lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
A : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
A : Dict = self.scheduler.timesteps.to(self.device )
A : Any = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
A : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
A : Dict = self.transformer(lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ ).sample
if do_classifier_free_guidance:
A : List[Any] = model_output.chunk(2 )
A : Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_ , dim=1 , keepdim=lowercase_ )
A : Dict = self.truncate(lowercase_ , lowercase_ )
# remove `log(0)`'s (`-inf`s)
A : Optional[Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
A : List[str] = self.scheduler.step(lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_ )
A : List[str] = self.vqvae.config.vq_embed_dim
A : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
A : Optional[Any] = self.vqvae.quantize.get_codebook_entry(lowercase_ , shape=lowercase_ )
A : Dict = self.vqvae.decode(lowercase_ , force_not_quantize=lowercase_ ).sample
A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
A : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A : Optional[Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float ) -> int:
A : Optional[Any] = torch.sort(lowercase_ , 1 , descending=lowercase_ )
A : Any = torch.exp(lowercase_ )
A : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
A : Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , lowercase_ )
A : Tuple = torch.cat((all_true, keep_mask) , dim=1 )
A : List[str] = keep_mask[:, :-1, :]
A : Optional[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
A : Dict = log_p_x_0.clone()
A : Tuple = -torch.inf # -inf = log(0)
return rv
| 358
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowerCamelCase_ ( _A ,_A ):
'''simple docstring'''
a__ = "resnet"
a__ = ["basic", "bottleneck"]
def __init__( self : Tuple , __lowerCamelCase : int=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , __lowerCamelCase : Tuple=[3, 4, 6, 3] , __lowerCamelCase : Optional[Any]="bottleneck" , __lowerCamelCase : Dict="relu" , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Tuple , ) -> Optional[Any]:
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
A : Any = num_channels
A : Union[str, Any] = embedding_size
A : Any = hidden_sizes
A : List[str] = depths
A : Union[str, Any] = layer_type
A : Any = hidden_act
A : Any = downsample_in_first_stage
A : Any = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
A , A : int = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> float:
return 1e-3
| 256
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = ["""note_seq"""]
def __init__( self : List[str] , *snake_case_ : Any , **snake_case_ : Union[str, Any] ):
requires_backends(self , ["note_seq"] )
@classmethod
def lowercase ( cls : int , *snake_case_ : str , **snake_case_ : Optional[Any] ):
requires_backends(cls , ["note_seq"] )
@classmethod
def lowercase ( cls : List[Any] , *snake_case_ : int , **snake_case_ : Dict ):
requires_backends(cls , ["note_seq"] )
| 22
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22
| 1
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ ='src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ =importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowercase__ =spec.loader.load_module()
lowercase__ =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase__ =re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
lowercase__ ={
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __UpperCamelCase ( ):
__a : List[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
__a : Tuple = False
# source code of `config_class`
__a : str = inspect.getsource(__UpperCAmelCase )
__a : List[str] = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__a , __a : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__a : Optional[int] = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__a : Dict = True
break
__a : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__a : Dict = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 356
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : CommonSchedulerState
# setable values
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : Optional[int] = None
@classmethod
def lowerCAmelCase (cls : int , snake_case_ : CommonSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray ):
return cls(common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ )
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : DDPMSchedulerState
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : str = [e.name for e in FlaxKarrasDiffusionSchedulers]
_SCREAMING_SNAKE_CASE : jnp.dtype
@property
def lowerCAmelCase (self : Optional[Any] ):
return True
@register_to_config
def __init__(self : Any , snake_case_ : int = 1_0_0_0 , snake_case_ : float = 0.0001 , snake_case_ : float = 0.02 , snake_case_ : str = "linear" , snake_case_ : Optional[jnp.ndarray] = None , snake_case_ : str = "fixed_small" , snake_case_ : bool = True , snake_case_ : str = "epsilon" , snake_case_ : jnp.dtype = jnp.floataa , ):
__a : str = dtype
def lowerCAmelCase (self : Any , snake_case_ : Optional[CommonSchedulerState] = None ):
if common is None:
__a : Optional[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a : int = jnp.array(1.0 , dtype=self.dtype )
__a : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case_ , init_noise_sigma=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : Dict , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : int , snake_case_ : Tuple = () ):
__a : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a : Any = (jnp.arange(0 , snake_case_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case_ , timesteps=snake_case_ , )
def lowerCAmelCase (self : List[Any] , snake_case_ : DDPMSchedulerState , snake_case_ : Optional[Any] , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=None ):
__a : Optional[Any] = state.common.alphas_cumprod[t]
__a : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a : Optional[Any] = jnp.clip(snake_case_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a : int = jnp.log(jnp.clip(snake_case_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__a : List[str] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a : Union[str, Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a : Any = variance
__a : Dict = state.common.betas[t]
__a : Any = (predicted_variance + 1) / 2
__a : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase (self : Any , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : int , snake_case_ : jnp.ndarray , snake_case_ : Optional[jax.random.KeyArray] = None , snake_case_ : bool = True , ):
__a : int = timestep
if key is None:
__a : Any = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a : List[str] = jnp.split(snake_case_ , sample.shape[1] , axis=1 )
else:
__a : int = None
# 1. compute alphas, betas
__a : Optional[int] = state.common.alphas_cumprod[t]
__a : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a : Optional[int] = 1 - alpha_prod_t
__a : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a : Union[str, Any] = model_output
elif self.config.prediction_type == "v_prediction":
__a : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a : Dict = jnp.clip(snake_case_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a : Optional[int] = jax.random.split(snake_case_ , num=1 )
__a : Union[str, Any] = jax.random.normal(snake_case_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case_ , snake_case_ , predicted_variance=snake_case_ ) ** 0.5) * noise
__a : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case_ , state=snake_case_ )
def lowerCAmelCase (self : List[str] , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return add_noise_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : str , snake_case_ : DDPMSchedulerState , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , snake_case_ : jnp.ndarray , ):
return get_velocity_common(state.common , snake_case_ , snake_case_ , snake_case_ )
def __len__(self : List[str] ):
return self.config.num_train_timesteps
| 90
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __a):
__a : Union[str, Any] = """ClapFeatureExtractor"""
__a : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _A , _A ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_A , _A )
def __call__( self , _A=None , _A=None , _A=None , **_A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = kwargs.pop("""sampling_rate""" , _A )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
_UpperCAmelCase : str = self.tokenizer(_A , return_tensors=_A , **_A )
if audios is not None:
_UpperCAmelCase : Tuple = self.feature_extractor(
_A , sampling_rate=_A , return_tensors=_A , **_A )
if text is not None and audios is not None:
_UpperCAmelCase : Union[str, Any] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def __snake_case ( self , *_A , **_A ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __snake_case ( self , *_A , **_A ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCAmelCase : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 246
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCAmelCase ( unittest.TestCase):
__a : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __snake_case ( self , _A , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_UpperCAmelCase : Tuple = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_UpperCAmelCase : List[str] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def __snake_case ( self , _A , _A ) -> Optional[int]:
'''simple docstring'''
for example in examples:
_UpperCAmelCase : str = video_classifier(_A )
self.assertEqual(
_A , [
{"""score""": ANY(_A ), """label""": ANY(_A )},
{"""score""": ANY(_A ), """label""": ANY(_A )},
] , )
@require_torch
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_UpperCAmelCase : Optional[Any] = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_UpperCAmelCase : List[str] = pipeline(
"""video-classification""" , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_UpperCAmelCase : Union[str, Any] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_UpperCAmelCase : Union[str, Any] = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
_UpperCAmelCase : int = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def __snake_case ( self ) -> Any:
'''simple docstring'''
pass
| 246
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = emb.weight.shape
UpperCAmelCase__ : Any = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase__ : Tuple = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict=None )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
for old_key in state_dict.keys():
UpperCAmelCase__ : Dict = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase__ : Optional[Any] = key.replace("moe_layer.experts.0" , f'ffn.experts.expert_{expert_idx}' )
else:
UpperCAmelCase__ : Union[str, Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase__ : List[str] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase__ : int = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase__ : Any = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase__ : Dict = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase__ : Tuple = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase__ : Optional[int] = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCAmelCase__ : Optional[int] = state_dict[old_key]
return new_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : Any , snake_case : int , snake_case : str = WEIGHTS_NAME )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Dict = 0
os.makedirs(snake_case , exist_ok=snake_case )
for expert in range(snake_case ):
UpperCAmelCase__ : int = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(snake_case ):
UpperCAmelCase__ : Optional[int] = torch.load(snake_case )["model"]
remove_ignore_keys_(snake_case )
UpperCAmelCase__ : Union[str, Any] = rename_fairseq_keys(snake_case , snake_case )
UpperCAmelCase__ : Dict = os.path.join(
snake_case , weights_name.replace(".bin" , f'-{len(snake_case )+1:05d}-of-???.bin' ) )
torch.save(snake_case , snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(snake_case )[0]].dtype )
# Add the last block
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , weights_name.replace(".bin" , f'-{len(snake_case )+1:05d}-of-???.bin' ) )
UpperCAmelCase__ : List[Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(snake_case )
UpperCAmelCase__ : Optional[int] = rename_fairseq_keys(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(snake_case ) == 1:
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , snake_case )
torch.save(snake_case , snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(snake_case , snake_case )
# Otherwise, let's build the index
UpperCAmelCase__ : List[str] = {}
for idx, shard in enumerate(snake_case ):
UpperCAmelCase__ : Optional[int] = weights_name.replace(".bin" , f'-{idx+1:05d}-of-{len(snake_case ):05d}.bin' )
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case , os.path.join(snake_case , snake_case ) )
for key in shard:
UpperCAmelCase__ : Optional[Any] = shard_file
# Add the metadata
UpperCAmelCase__ : Optional[int] = {"total_size": total_size}
UpperCAmelCase__ : int = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(snake_case , snake_case ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ : List[str] = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase : int = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 298
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 1
|
"""simple docstring"""
from manim import *
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : Any ):
__lowercase = Rectangle(height=0.5, width=0.5 )
__lowercase = Rectangle(height=0.25, width=0.25 )
__lowercase = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("CPU", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(4 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("GPU", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Model", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
__lowercase = []
__lowercase = []
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
__lowercase = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=UpperCAmelCase__, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=UpperCAmelCase__, buff=0.0 )
self.add(UpperCAmelCase__ )
model_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__, *UpperCAmelCase__, *UpperCAmelCase__ )
__lowercase = [mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Loaded Checkpoint", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCAmelCase__ )
__lowercase = []
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
__lowercase = fill.copy().set_fill(UpperCAmelCase__, opacity=0.7 )
target.move_to(UpperCAmelCase__ )
ckpt_arr.append(UpperCAmelCase__ )
__lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCAmelCase__ )
self.add(*UpperCAmelCase__, *UpperCAmelCase__ )
__lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""", font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""", font_size=1_8, )
blue_text.next_to(UpperCAmelCase__, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase__ )
__lowercase = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""", font_size=2_4, )
step_a.move_to([2, 2, 0] )
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = [meta_mem.copy() for i in range(6 )]
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = VGroup(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0 )
__lowercase = Text("Disk", font_size=2_4 )
__lowercase = Group(UpperCAmelCase__, UpperCAmelCase__ ).arrange(UpperCAmelCase__, buff=0.5, aligned_edge=UpperCAmelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCAmelCase__, run_time=3 ), Write(UpperCAmelCase__, run_time=1 ), Create(UpperCAmelCase__, run_time=1 ) )
__lowercase = []
for i, rect in enumerate(UpperCAmelCase__ ):
__lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCAmelCase__, run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(FadeOut(UpperCAmelCase__ ) )
__lowercase = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""", font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__, run_time=3 ) )
self.play(
FadeOut(UpperCAmelCase__, UpperCAmelCase__, *UpperCAmelCase__, *UpperCAmelCase__ ), )
self.wait()
| 17
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase ( a_ , a_ ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
else:
lowerCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
lowerCAmelCase_ = ['key_proj', 'value_proj', 'query_proj']
lowerCAmelCase_ = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ = key.split('.' )
if attributes[0] == "lm_head":
lowerCAmelCase_ = prophet
lowerCAmelCase_ = prophet_old
else:
lowerCAmelCase_ = prophet.prophetnet
lowerCAmelCase_ = prophet_old.model
lowerCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ = mapping[attribute]
if not hasattr(a_ , a_ ) and len(a_ ) > 0:
lowerCAmelCase_ = attribute
elif hasattr(a_ , a_ ):
lowerCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
lowerCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ = old_model.bias
logger.info(F'''{attribute} is initialized''' )
lowerCAmelCase_ = True
break
elif attribute in special_keys and hasattr(a_ , 'in_proj_weight' ):
lowerCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ = getattr(a_ , a_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase_ = True
break
if attribute.isdigit():
lowerCAmelCase_ = model[int(a_ )]
lowerCAmelCase_ = old_model[int(a_ )]
else:
lowerCAmelCase_ = getattr(a_ , a_ )
if old_attribute == "":
lowerCAmelCase_ = old_model
else:
if not hasattr(a_ , a_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase_ = getattr(a_ , a_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 363
|
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = kwargs.pop("padding_side" , "right" )
A__ = kwargs.pop("return_attention_mask" , lowercase )
super().__init__(**lowercase )
def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
A__ = processed_features[self.model_input_names[0]]
A__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
A__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A__ = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
A__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
A__ = "tf"
elif is_torch_tensor(lowercase ):
A__ = "pt"
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
A__ = "np"
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowercase )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A__ = to_numpy(lowercase )
else:
A__ = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
A__ = processed_features[self.model_input_names[0]]
A__ = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
A__ = []
for i in range(lowercase ):
A__ = {k: v[i] for k, v in processed_features.items()}
# truncation
A__ = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A__ = PaddingStrategy.MAX_LENGTH
A__ = {}
for i in range(lowercase ):
# padding
A__ = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
A__ = []
if value.dtype is np.dtype(np.floataa ):
A__ = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
'''simple docstring'''
A__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A__ = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A__ = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
A__ = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (0, difference) )
A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
A__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = len(lowercase ) > max_length
if needs_to_be_truncated:
A__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A__ = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any:
'''simple docstring'''
if padding is not False:
if padding is True:
A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
A__ = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
A__ = padding
else:
A__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 68
|
"""simple docstring"""
def lowercase ( a__ : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase = len(a__ )
while cur > 1:
# Find the maximum number in arr
_UpperCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCamelCase = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
_UpperCamelCase = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 256
| 0
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' ,UpperCamelCase_ ).groups()[0]
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=None , __snake_case=None ):
snake_case = file_names
snake_case = image_transform
snake_case = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , __snake_case ):
snake_case = self.file_names[idx]
snake_case = PIL.Image.open(__snake_case )
snake_case = raw_image.convert('''RGB''' )
if self.image_transform is not None:
snake_case = self.image_transform(__snake_case )
snake_case = extract_label(__snake_case )
if self.label_to_id is not None:
snake_case = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if args.with_tracking:
snake_case = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with='''all''' ,project_dir=args.project_dir )
else:
snake_case = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config['''lr''']
snake_case = int(config['''num_epochs'''] )
snake_case = int(config['''seed'''] )
snake_case = int(config['''batch_size'''] )
snake_case = config['''image_size''']
if not isinstance(UpperCamelCase_ ,(list, tuple) ):
snake_case = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,'''isdigit''' ):
if args.checkpointing_steps == "epoch":
snake_case = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
snake_case = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case = os.path.split(UpperCamelCase_ )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCamelCase_ ,UpperCamelCase_ )
# Grab all the image filenames
snake_case = [os.path.join(args.data_dir ,UpperCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
snake_case = [extract_label(UpperCamelCase_ ) for fname in file_names]
snake_case = list(set(UpperCamelCase_ ) )
id_to_label.sort()
snake_case = {lbl: i for i, lbl in enumerate(UpperCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# Split our filenames between train and validation
snake_case = np.random.permutation(len(UpperCamelCase_ ) )
snake_case = int(0.8 * len(UpperCamelCase_ ) )
snake_case = random_perm[:cut]
snake_case = random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case = Compose([RandomResizedCrop(UpperCamelCase_ ,scale=(0.5, 1.0) ), ToTensor()] )
snake_case = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=UpperCamelCase_ ,label_to_id=UpperCamelCase_ )
# For evaluation, we use a deterministic Resize
snake_case = Compose([Resize(UpperCamelCase_ ), ToTensor()] )
snake_case = PetsDataset([file_names[i] for i in eval_split] ,image_transform=UpperCamelCase_ ,label_to_id=UpperCamelCase_ )
# Instantiate dataloaders.
snake_case = DataLoader(UpperCamelCase_ ,shuffle=UpperCamelCase_ ,batch_size=UpperCamelCase_ ,num_workers=4 )
snake_case = DataLoader(UpperCamelCase_ ,shuffle=UpperCamelCase_ ,batch_size=UpperCamelCase_ ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = create_model('''resnet50d''' ,pretrained=UpperCamelCase_ ,num_classes=len(UpperCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case = False
for param in model.get_classifier().parameters():
snake_case = True
# We normalize the batches of images to be a bit faster.
snake_case = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
snake_case = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
snake_case = OneCycleLR(optimizer=UpperCamelCase_ ,max_lr=UpperCamelCase_ ,epochs=UpperCamelCase_ ,steps_per_epoch=len(UpperCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case = accelerator.prepare(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# We need to keep track of how many total steps we have iterated over
snake_case = 0
# We also need to keep track of the starting epoch so files are named properly
snake_case = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
snake_case = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case = os.path.splitext(UpperCamelCase_ )[0]
if "epoch" in training_difference:
snake_case = int(training_difference.replace('''epoch_''' ,'''''' ) ) + 1
snake_case = None
else:
snake_case = int(training_difference.replace('''step_''' ,'''''' ) )
snake_case = resume_step // len(UpperCamelCase_ )
resume_step -= starting_epoch * len(UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ,UpperCamelCase_ ):
model.train()
if args.with_tracking:
snake_case = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case = accelerator.skip_first_batches(UpperCamelCase_ ,UpperCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case = (batch['''image'''] - mean) / std
snake_case = model(UpperCamelCase_ )
snake_case = torch.nn.functional.cross_entropy(UpperCamelCase_ ,batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case = os.path.join(args.output_dir ,UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
model.eval()
snake_case = 0
snake_case = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case = (batch['''image'''] - mean) / std
with torch.no_grad():
snake_case = model(UpperCamelCase_ )
snake_case = outputs.argmax(dim=-1 )
snake_case , snake_case = accelerator.gather_for_metrics((predictions, batch['''label''']) )
snake_case = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_00 * eval_metric,
'''train_loss''': total_loss.item() / len(UpperCamelCase_ ),
'''epoch''': epoch,
} ,step=UpperCamelCase_ ,)
if checkpointing_steps == "epoch":
snake_case = F'''epoch_{epoch}'''
if args.output_dir is not None:
snake_case = os.path.join(args.output_dir ,UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' ,required=UpperCamelCase_ ,help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' ,help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' ,type=UpperCamelCase_ ,default=UpperCamelCase_ ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' ,type=UpperCamelCase_ ,default=UpperCamelCase_ ,help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' ,)
parser.add_argument(
'''--output_dir''' ,type=UpperCamelCase_ ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--resume_from_checkpoint''' ,type=UpperCamelCase_ ,default=UpperCamelCase_ ,help='''If the training should continue from a checkpoint folder.''' ,)
parser.add_argument(
'''--with_tracking''' ,action='''store_true''' ,help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' ,)
parser.add_argument(
'''--project_dir''' ,type=UpperCamelCase_ ,default='''logs''' ,help='''Location on where to store experiment tracking logs` and relevent project information''' ,)
snake_case = parser.parse_args()
snake_case = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24}
training_function(UpperCamelCase_ ,UpperCamelCase_ )
if __name__ == "__main__":
main()
| 213
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'efficientnet'
def __init__( self , __snake_case = 3 , __snake_case = 6_0_0 , __snake_case = 2.0 , __snake_case = 3.1 , __snake_case = 8 , __snake_case = [3, 3, 5, 3, 5, 5, 3] , __snake_case = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case = [] , __snake_case = [1, 2, 2, 2, 1, 2, 1] , __snake_case = [1, 2, 2, 3, 3, 4, 1] , __snake_case = [1, 6, 6, 6, 6, 6, 6] , __snake_case = 0.25 , __snake_case = "swish" , __snake_case = 2_5_6_0 , __snake_case = "mean" , __snake_case = 0.02 , __snake_case = 0.001 , __snake_case = 0.99 , __snake_case = 0.5 , __snake_case = 0.2 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = num_channels
snake_case = image_size
snake_case = width_coefficient
snake_case = depth_coefficient
snake_case = depth_divisor
snake_case = kernel_sizes
snake_case = in_channels
snake_case = out_channels
snake_case = depthwise_padding
snake_case = strides
snake_case = num_block_repeats
snake_case = expand_ratios
snake_case = squeeze_expansion_ratio
snake_case = hidden_act
snake_case = hidden_dim
snake_case = pooling_type
snake_case = initializer_range
snake_case = batch_norm_eps
snake_case = batch_norm_momentum
snake_case = dropout_rate
snake_case = drop_connect_rate
snake_case = sum(__snake_case ) * 4
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-5
| 213
| 1
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 70
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase_ ( self ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase = torch.arange(self.height * self.width )
__lowerCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase__ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase = self.shape
__lowerCamelCase = int(np.prod(lowerCamelCase__ ) )
__lowerCamelCase = self.get_image_coords()
__lowerCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase = self.get_camera_rays(lowerCamelCase__ )
__lowerCamelCase = rays.view(lowerCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase_ ( self , lowerCamelCase__ ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase = coords.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = self.resolution()
__lowerCamelCase = self.fov()
__lowerCamelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase = fracs * torch.tan(fov / 2 )
__lowerCamelCase = fracs.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = (
self.z.view(lowerCamelCase__ , 1 , 3 )
+ self.x.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase__ )
__lowerCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase__ , *lowerCamelCase__ , 2 , 3 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase = np.array([np.sin(UpperCamelCase__ ), np.cos(UpperCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase = -z * 4
__lowerCamelCase = np.array([np.cos(UpperCamelCase__ ), -np.sin(UpperCamelCase__ ), 0.0] )
__lowerCamelCase = np.cross(UpperCamelCase__ , UpperCamelCase__ )
origins.append(UpperCamelCase__ )
xs.append(UpperCamelCase__ )
ys.append(UpperCamelCase__ )
zs.append(UpperCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCamelCase__ )) , )
| 90
| 0
|
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 172
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = KandinskyVaaPipeline
__A : Any = [
"image_embeds",
"negative_image_embeds",
]
__A : Tuple = ["image_embeds", "negative_image_embeds"]
__A : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__A : Union[str, Any] = False
@property
def __snake_case ( self : str ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __snake_case ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1_0_0
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Optional[Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase :int = UNetaDConditionModel(**snake_case__ )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase :Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :Optional[Any] = self.dummy_unet
lowercase :List[Any] = self.dummy_movq
lowercase :Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
lowercase :str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : str , snake_case__ : Any , snake_case__ : str=0 ):
'''simple docstring'''
lowercase :Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
lowercase :Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
lowercase :Optional[int] = torch.manual_seed(snake_case__ )
else:
lowercase :Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowercase :List[Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : List[str] ):
'''simple docstring'''
lowercase :List[Any] = '''cpu'''
lowercase :Tuple = self.get_dummy_components()
lowercase :Any = self.pipeline_class(**snake_case__ )
lowercase :List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase :Optional[Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
lowercase :str = output.images
lowercase :Dict = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
lowercase :Any = image[0, -3:, -3:, -1]
lowercase :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase :List[Any] = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowercase :int = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
lowercase :Tuple = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase :str = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
lowercase :int = '''red cat, 4k photo'''
lowercase :str = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase , lowercase :Union[str, Any] = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase :Tuple = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowercase :List[Any] = pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_0_0 , output_type='''np''' , )
lowercase :Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 172
| 1
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowerCAmelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__UpperCamelCase : Dict = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , snake_case__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowerCAmelCase ( ):
assert _test_patching.open is open
__UpperCamelCase : str = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , snake_case__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowerCAmelCase ( ):
# pandas.read_csv is not present in _test_patching
__UpperCamelCase : Union[str, Any] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , snake_case__ ):
pass
def __lowerCAmelCase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__UpperCamelCase : List[Any] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , snake_case__ ) is None
with patch_submodule(_test_patching , "len" , snake_case__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowerCAmelCase ( ):
__UpperCamelCase : List[str] = "__test_patch_submodule_start_and_stop_mock__"
__UpperCamelCase : Any = patch_submodule(_test_patching , "open" , snake_case__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowerCAmelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__UpperCamelCase : Union[str, Any] = "__test_patch_submodule_successive_join__"
__UpperCamelCase : Tuple = "__test_patch_submodule_successive_dirname__"
__UpperCamelCase : Tuple = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , snake_case__ ):
with patch_submodule(_test_patching , "os.rename" , snake_case__ ):
with patch_submodule(_test_patching , "os.path.dirname" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , snake_case__ ):
with patch_submodule(_test_patching , "os.path.join" , snake_case__ ):
with patch_submodule(_test_patching , "os.path.dirname" , snake_case__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowerCAmelCase ( ):
__UpperCamelCase : Any = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , snake_case__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , snake_case__ ):
pass
| 298
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 139
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def snake_case_ (__A : Optional[int] , __A : Any ) -> Any:
__lowerCAmelCase : Union[str, Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def snake_case_ (__A : List[str] , __A : str ) -> Optional[Any]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCAmelCase : Optional[Any] = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase : Tuple = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCAmelCase : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_weight[
-encoder_config.hidden_size :, :
]
def snake_case_ (__A : Union[str, Any] , __A : str , __A : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase : Any = dct.pop(__A )
__lowerCAmelCase : str = val
def snake_case_ (__A : int ) -> Tuple:
if "handwritten" in checkpoint_url:
__lowerCAmelCase : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
__lowerCAmelCase : Dict = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def snake_case_ (__A : Any , __A : Union[str, Any] ) -> Optional[int]:
__lowerCAmelCase : List[Any] = ViTConfig(image_size=3_8_4 , qkv_bias=__A )
__lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCAmelCase : Union[str, Any] = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCAmelCase : Any = 1_0_2_4
__lowerCAmelCase : Any = 4_0_9_6
__lowerCAmelCase : Optional[int] = 2_4
__lowerCAmelCase : str = 1_6
__lowerCAmelCase : List[Any] = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = """relu"""
__lowerCAmelCase : List[Any] = 1_0_2_4
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Dict = False
# load HuggingFace model
__lowerCAmelCase : Dict = ViTModel(__A , add_pooling_layer=__A )
__lowerCAmelCase : Union[str, Any] = TrOCRForCausalLM(__A )
__lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
# load state_dict of original model, rename some keys
__lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" , check_hash=__A )["""model"""]
__lowerCAmelCase : Any = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCAmelCase : Tuple = state_dict.pop(__A )
if key.startswith("""decoder""" ) and "output_projection" not in key:
__lowerCAmelCase : str = val
else:
__lowerCAmelCase : Tuple = val
# load state dict
model.load_state_dict(__A )
# Check outputs on an image
__lowerCAmelCase : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
__lowerCAmelCase : List[str] = RobertaTokenizer.from_pretrained("""roberta-large""" )
__lowerCAmelCase : List[Any] = TrOCRProcessor(__A , __A )
__lowerCAmelCase : List[str] = processor(images=prepare_img(__A ) , return_tensors="""pt""" ).pixel_values
# verify logits
__lowerCAmelCase : List[str] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCAmelCase : List[str] = model(pixel_values=__A , decoder_input_ids=__A )
__lowerCAmelCase : Optional[Any] = outputs.logits
__lowerCAmelCase : Union[str, Any] = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCAmelCase : Dict = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCAmelCase : Tuple = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __A , atol=1e-3 ), "First elements of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 139
| 1
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
A_ : int = 'mask2former'
A_ : Any = ['swin']
A_ : Optional[int] = {'hidden_size': 'hidden_dim'}
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 1024 , __UpperCAmelCase = "relu" , __UpperCAmelCase = 6 , __UpperCAmelCase = 10 , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 2048 , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 4 , __UpperCAmelCase = 255 , __UpperCAmelCase = 100 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 2.0 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 5.0 , __UpperCAmelCase = 12544 , __UpperCAmelCase = 3.0 , __UpperCAmelCase = 0.75 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = True , __UpperCAmelCase = [4, 8, 16, 32] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
_a = CONFIG_MAPPING['''swin'''](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_a = backbone_config.pop('''model_type''' )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
F'Supported model types: {",".join(self.backbones_supported )}' )
_a = backbone_config
_a = feature_size
_a = mask_feature_size
_a = hidden_dim
_a = encoder_feedforward_dim
_a = activation_function
_a = encoder_layers
_a = decoder_layers
_a = num_attention_heads
_a = dropout
_a = dim_feedforward
_a = pre_norm
_a = enforce_input_projection
_a = common_stride
_a = ignore_value
_a = num_queries
_a = no_object_weight
_a = class_weight
_a = mask_weight
_a = dice_weight
_a = train_num_points
_a = oversample_ratio
_a = importance_sample_ratio
_a = init_std
_a = init_xavier_std
_a = use_auxiliary_loss
_a = feature_strides
_a = output_auxiliary_logits
_a = decoder_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def _UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
return cls(
backbone_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def _UpperCAmelCase ( self ) -> Dict[str, any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 320
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14
| 0
|
'''simple docstring'''
from manim import *
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = Rectangle(height=0.5,width=0.5 )
__lowerCAmelCase = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = VGroup(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = Text("""CPU""",font_size=24 )
__lowerCAmelCase = Group(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0.5,aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [mem.copy() for i in range(1 )]
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = Text("""GPU""",font_size=24 )
__lowerCAmelCase = Group(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0.5,aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.align_to(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0 )
__lowerCAmelCase = Text("""Model""",font_size=24 )
__lowerCAmelCase = Group(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE,buff=0.5,aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(__SCREAMING_SNAKE_CASE,run_time=1 ),Create(__SCREAMING_SNAKE_CASE,run_time=1 ),Create(__SCREAMING_SNAKE_CASE,run_time=1 ),)
__lowerCAmelCase = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.',font_size=24,)
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model',font_size=18,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE,run_time=2.5 ),Write(__SCREAMING_SNAKE_CASE ),Write(__SCREAMING_SNAKE_CASE ) )
self.add(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE,opacity=0.7 )
cpu_target.move_to(__SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
__lowerCAmelCase = 0.46 / 4
__lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.02,direction=__SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target,direction=__SCREAMING_SNAKE_CASE,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target,direction=__SCREAMING_SNAKE_CASE,buff=0.0 )
cpu_targs.append(__SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE,run_time=1.5 ) )
self.play(*__SCREAMING_SNAKE_CASE )
self.play(*__SCREAMING_SNAKE_CASE )
self.wait()
| 359
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46
| 0
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__SCREAMING_SNAKE_CASE =get_logger(__name__)
__SCREAMING_SNAKE_CASE =r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class UpperCamelCase :
@add_start_docstrings(__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase :
@add_start_docstrings(__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase ( lowercase_ ):
@add_start_docstrings(__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
for processor in self:
lowercase_ : List[Any] = inspect.signature(processor.__call__ ).parameters
if len(__UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
lowercase_ : int = processor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
else:
lowercase_ : int = processor(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowercase_ : str = temperature
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ : Any = scores / self.temperature
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = -float('Inf' ) ,__UpperCamelCase = 1 ) -> Any:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowercase_ : List[Any] = top_p
lowercase_ : str = filter_value
lowercase_ : int = min_tokens_to_keep
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = lax.top_k(__UpperCamelCase ,scores.shape[-1] )
lowercase_ : Any = jnp.full_like(__UpperCamelCase ,self.filter_value )
lowercase_ : Union[str, Any] = jax.nn.softmax(__UpperCamelCase ,axis=-1 ).cumsum(axis=-1 )
lowercase_ : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase_ : Any = jnp.roll(__UpperCamelCase ,1 )
score_mask |= score_mask.at[:, 0].set(__UpperCamelCase )
# min tokens to keep
lowercase_ : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCamelCase )
lowercase_ : Optional[int] = jnp.where(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = jax.lax.sort_key_val(__UpperCamelCase ,__UpperCamelCase )[-1]
return next_scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = -float('Inf' ) ,__UpperCamelCase = 1 ) -> Dict:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowercase_ : List[Any] = max(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = filter_value
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = scores.shape
lowercase_ : Any = jnp.full(batch_size * vocab_size ,self.filter_value )
lowercase_ : str = min(self.top_k ,scores.shape[-1] ) # Safety check
lowercase_ , lowercase_ : Tuple = lax.top_k(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Any = jnp.broadcast_to((jnp.arange(__UpperCamelCase ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
lowercase_ : Tuple = topk_scores.flatten()
lowercase_ : List[str] = topk_indices.flatten() + shift
lowercase_ : Tuple = next_scores_flat.at[topk_indices_flat].set(__UpperCamelCase )
lowercase_ : List[str] = next_scores_flat.reshape(__UpperCamelCase ,__UpperCamelCase )
return next_scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = bos_token_id
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ : Optional[int] = jnp.full(scores.shape ,-float('inf' ) )
lowercase_ : int = 1 - jnp.bool_(cur_len - 1 )
lowercase_ : List[str] = jnp.where(__UpperCamelCase ,new_scores.at[:, self.bos_token_id].set(0 ) ,__UpperCamelCase )
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : int = max_length
lowercase_ : int = eos_token_id
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ : Tuple = jnp.full(scores.shape ,-float('inf' ) )
lowercase_ : Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase_ : List[str] = jnp.where(__UpperCamelCase ,new_scores.at[:, self.eos_token_id].set(0 ) ,__UpperCamelCase )
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowercase_ : Optional[int] = min_length
lowercase_ : Optional[int] = eos_token_id
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ : List[Any] = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
lowercase_ : int = jnp.where(__UpperCamelCase ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,__UpperCamelCase )
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Tuple = list(__UpperCamelCase )
lowercase_ : List[Any] = begin_index
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : str = 1 - jnp.bool_(cur_len - self.begin_index )
lowercase_ : List[str] = jnp.where(__UpperCamelCase ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,__UpperCamelCase )
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : str = list(__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
lowercase_ : Tuple = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = dict(__UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase_ : str = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase_ : Optional[Any] = force_token_array.at[index].set(__UpperCamelCase )
lowercase_ : List[Any] = jnp.intaa(__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> jnp.ndarray:
'''simple docstring'''
def _force_token(__UpperCamelCase ):
lowercase_ : Optional[Any] = scores.shape[0]
lowercase_ : List[str] = self.force_token_array[generation_idx]
lowercase_ : str = jnp.ones_like(__UpperCamelCase ,dtype=scores.dtype ) * -float('inf' )
lowercase_ : Tuple = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
lowercase_ : List[Any] = lax.dynamic_update_slice(__UpperCamelCase ,__UpperCamelCase ,(0, current_token) )
return new_scores
lowercase_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(__UpperCamelCase ) ,lambda: scores ,) ,)
return scores
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = generate_config.eos_token_id
lowercase_ : Optional[Any] = generate_config.no_timestamps_token_id
lowercase_ : Optional[int] = generate_config.no_timestamps_token_id + 1
lowercase_ : Tuple = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__UpperCamelCase ,'max_initial_timestamp_index' ):
lowercase_ : Optional[int] = generate_config.max_initial_timestamp_index
else:
lowercase_ : List[str] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase_ : List[Any] = model_config.vocab_size
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : Dict = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,__UpperCamelCase ,)
lowercase_ : List[Any] = jnp.where((cur_len - self.begin_index) < 2 ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,__UpperCamelCase ,__UpperCamelCase ,)
return jnp.where(
__UpperCamelCase ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,__UpperCamelCase ,)
lowercase_ : List[str] = jax.vmap(__UpperCamelCase )(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : int = jnp.where(cur_len == self.begin_index ,__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[str] = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,__UpperCamelCase ,)
lowercase_ : List[str] = self.timestamp_begin + self.max_initial_timestamp_index
lowercase_ : List[Any] = jnp.where(
__UpperCamelCase ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,__UpperCamelCase ,)
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase_ : List[str] = jax.nn.log_softmax(__UpperCamelCase ,axis=-1 )
def handle_cumulative_probs(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[int] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
lowercase_ : Optional[int] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,__UpperCamelCase ,)
lowercase_ : List[str] = jax.vmap(__UpperCamelCase )(__UpperCamelCase ,__UpperCamelCase )
return scores
| 213
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase__( ):
lowercase_ : Any = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Parse args
lowercase_ , lowercase_ : Dict = parser.parse_known_args()
if not hasattr(__SCREAMING_SNAKE_CASE , 'func' ):
parser.print_help()
exit(1 )
lowercase_ : int = parse_unknown_args(__SCREAMING_SNAKE_CASE )
# Run
lowercase_ : List[Any] = args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 213
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple: # noqa: E741
'''simple docstring'''
while r - l > 1:
_lowerCamelCase : Tuple = (l + r) // 2
if v[m] >= key:
_lowerCamelCase : Union[str, Any] = m
else:
_lowerCamelCase : Tuple = m # noqa: E741
return r
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 0
_lowerCamelCase : Optional[Any] = [0] * len(__SCREAMING_SNAKE_CASE )
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = v[0]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
_lowerCamelCase : List[Any] = v[i]
elif v[i] > tail[length - 1]:
_lowerCamelCase : Optional[int] = v[i]
length += 1
else:
_lowerCamelCase : int = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 340
| 0
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_a : int= threading.Lock()
_a : Optional[logging.Handler]= None
_a : str= {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_a : Optional[Any]= logging.WARNING
_a : Optional[int]= True
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = os.getenv('TRANSFORMERS_VERBOSITY' , UpperCAmelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def __UpperCAmelCase ( ) -> str:
'''simple docstring'''
return __name__.split('.' )[0]
def __UpperCAmelCase ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__snake_case : Optional[Any] = logging.StreamHandler() # Set sys.stderr as stream.
__snake_case : List[str] = sys.stderr.flush
# Apply our default configuration to the library root logger.
__snake_case : Tuple = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__snake_case : Union[str, Any] = False
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
__snake_case : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__snake_case : Optional[int] = None
def __UpperCAmelCase ( ) -> Dict:
'''simple docstring'''
return log_levels
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[str] = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
__snake_case : Any = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
return set_verbosity(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __UpperCAmelCase ( UpperCAmelCase_ : logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : logging.Handler ) -> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
__snake_case : Dict = False
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
_configure_library_root_logger()
__snake_case : int = True
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : str = _get_library_root_logger().handlers
for handler in handlers:
__snake_case : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(UpperCAmelCase_ )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase_ )
def __UpperCAmelCase ( self : str , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , UpperCAmelCase_ )
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase_ , **UpperCAmelCase_ )
_a : Dict= warning_advice
@functools.lru_cache(UpperCAmelCase_ )
def __UpperCAmelCase ( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
self.warning(*UpperCAmelCase_ , **UpperCAmelCase_ )
_a : str= warning_once
class UpperCamelCase :
def __init__(self : List[str] , *_A : Any , **_A : Optional[int]) -> Optional[Any]: # pylint: disable=unused-argument
__snake_case : Union[str, Any] = args[0] if args else None
def __iter__(self : Any) -> Any:
return iter(self._iterator)
def __getattr__(self : Union[str, Any] , _A : List[Any]) -> Optional[Any]:
def empty_fn(*_A : int , **_A : Union[str, Any]): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self : Union[str, Any]) -> int:
return self
def __exit__(self : int , _A : Optional[Any] , _A : List[Any] , _A : Optional[Any]) -> int:
return
class UpperCamelCase :
def __call__(self : int , *_A : Optional[int] , **_A : Union[str, Any]) -> Optional[int]:
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A)
else:
return EmptyTqdm(*_A , **_A)
def _lowercase (self : int , *_A : Union[str, Any] , **_A : List[Any]) -> Optional[int]:
__snake_case : Union[str, Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A)
def _lowercase (self : Optional[Any]) -> Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_a : Union[str, Any]= _tqdm_cls()
def __UpperCAmelCase ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
global _tqdm_active
__snake_case : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
global _tqdm_active
__snake_case : List[str] = False
hf_hub_utils.disable_progress_bars()
| 172
|
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_a : Any= "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 172
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCamelCase__ =[
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
UpperCamelCase__ =[
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
UpperCamelCase__ =(
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCamelCase__ =(
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
UpperCamelCase__ =[
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for tf_name, hf_name in patterns:
_SCREAMING_SNAKE_CASE : List[Any] = k.replace(A_, A_ )
return k
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = BigBirdPegasusConfig(**A_ )
_SCREAMING_SNAKE_CASE : Tuple = BigBirdPegasusForConditionalGeneration(A_ )
_SCREAMING_SNAKE_CASE : Dict = torch_model.state_dict()
_SCREAMING_SNAKE_CASE : str = {}
# separating decoder weights
_SCREAMING_SNAKE_CASE : Optional[int] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_SCREAMING_SNAKE_CASE : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items(), "tf -> hf conversion" ):
_SCREAMING_SNAKE_CASE : Any = [k.endswith(A_ ) for ending in KEYS_TO_IGNORE]
if any(A_ ):
continue
_SCREAMING_SNAKE_CASE : List[Any] = DECODER_PATTERNS
_SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(A_, A_ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_SCREAMING_SNAKE_CASE : List[Any] = v.T
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(A_ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), "tf -> hf conversion" ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = [k.endswith(A_ ) for ending in KEYS_TO_IGNORE]
if any(A_ ):
continue
_SCREAMING_SNAKE_CASE : List[Any] = REMAINING_PATTERNS
_SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(A_, A_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_SCREAMING_SNAKE_CASE : Optional[Any] = v.T
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(A_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''model.embed_positions.weight''']
_SCREAMING_SNAKE_CASE : Union[str, Any] = mapping.pop("model.embed_positions.weight" )
_SCREAMING_SNAKE_CASE : Dict = torch_model.load_state_dict(A_, strict=A_ )
_SCREAMING_SNAKE_CASE : Optional[int] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = tf.train.list_variables(A_ )
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : List[Any] = ['''global_step''']
for name, shape in tqdm(A_, desc="converting tf checkpoint to dict" ):
_SCREAMING_SNAKE_CASE : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.load_variable(A_, A_ )
_SCREAMING_SNAKE_CASE : Tuple = array
return tf_weights
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = get_tf_weights_as_numpy(A_ )
_SCREAMING_SNAKE_CASE : str = convert_bigbird_pegasus(A_, A_ )
torch_model.save_pretrained(A_ )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ =parser.parse_args()
UpperCamelCase__ ={}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 358
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ) -> Any:
_SCREAMING_SNAKE_CASE : str = parent
_SCREAMING_SNAKE_CASE : List[Any] = 1_3
_SCREAMING_SNAKE_CASE : List[str] = 7
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : int = 9_9
_SCREAMING_SNAKE_CASE : str = 3_8_4
_SCREAMING_SNAKE_CASE : List[Any] = 2
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Dict = 3_7
_SCREAMING_SNAKE_CASE : Union[str, Any] = "gelu"
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : str = 0.1
_SCREAMING_SNAKE_CASE : List[Any] = 5_1_2
_SCREAMING_SNAKE_CASE : Tuple = 1_6
_SCREAMING_SNAKE_CASE : Dict = 2
_SCREAMING_SNAKE_CASE : Any = 0.02
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : List[Any] = 1_2_8
_SCREAMING_SNAKE_CASE : Optional[int] = 2
_SCREAMING_SNAKE_CASE : int = 9
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = TFConvBertForMaskedLM(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.num_labels
_SCREAMING_SNAKE_CASE : str = TFConvBertForSequenceClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : Optional[int] = self.num_choices
_SCREAMING_SNAKE_CASE : List[Any] = TFConvBertForMultipleChoice(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFConvBertForTokenClassification(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = TFConvBertModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
if hasattr(__lowerCamelCase , "use_cache" ):
_SCREAMING_SNAKE_CASE : List[str] = True
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase , "saved_model" , "1" )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : List[Any] = outputs["encoder_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = outputs["encoder_attentions"]
else:
_SCREAMING_SNAKE_CASE : List[str] = outputs["hidden_states"]
_SCREAMING_SNAKE_CASE : Dict = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_SCREAMING_SNAKE_CASE : Any = getattr(self.model_tester , "key_length" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
_SCREAMING_SNAKE_CASE : Tuple = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_SCREAMING_SNAKE_CASE : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE : str = model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1E-4 )
| 325
| 0
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( snake_case ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case ):
for j in range(snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A_ = imread("image_data/lena.jpg", 1)
# convert to its negative
A_ = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 139
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A_ ( snake_case ):
return np.maximum(0 , snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 139
| 1
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a : int = TypeVar('T')
class a ( Generic[T] ):
def __init__( self : Union[str, Any] , lowercase_ : bool = True ):
snake_case_ = {} # dictionary of lists
snake_case_ = directed
def A_ ( self : int , lowercase_ : T , lowercase_ : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
self.adj_list[destination_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
snake_case_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase_ )
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ = [destination_vertex]
snake_case_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase_ )
snake_case_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ = [destination_vertex]
snake_case_ = []
return self
def __repr__( self : Optional[Any] ):
return pformat(self.adj_list )
| 72
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
a : int = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path_factory.getbasetemp() / '''cache'''
snake_case_ = test_hf_cache_home / '''datasets'''
snake_case_ = test_hf_cache_home / '''metrics'''
snake_case_ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(__UpperCAmelCase ) )
@pytest.fixture(autouse=__UpperCAmelCase, scope='''session''' )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', __UpperCAmelCase )
| 72
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] = None , snake_case__ : str = None , snake_case__ : int = None , snake_case__ : List[Any] = False , snake_case__ : Union[str, Any] = False , snake_case__ : List[Any] = None , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(
snake_case__ , split=snake_case__ , features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
_UpperCAmelCase = path_or_paths if isinstance(snake_case__ , snake_case__ ) else {self.split: path_or_paths}
_UpperCAmelCase = Text(
cache_dir=snake_case__ , data_files=snake_case__ , features=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 133
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int]="attention" ) -> Optional[Any]:
'''simple docstring'''
lowercase = lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :str=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
lowercase = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase = (wi_a, wi_a)
else:
lowercase = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def UpperCAmelCase__ ( lowerCAmelCase__ :dict , *, lowerCAmelCase__ :int , lowerCAmelCase__ :bool , lowerCAmelCase__ :bool = False ) -> Union[str, Any]:
'''simple docstring'''
lowercase = traverse_util.flatten_dict(variables["""target"""] )
lowercase = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
lowercase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
lowercase = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
lowercase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :bool ) -> Tuple:
'''simple docstring'''
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase = state_dict["""shared.weight"""]
return state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
lowercase = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
lowercase = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]:
'''simple docstring'''
lowercase = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = UMTaEncoderModel(lowerCAmelCase__ )
else:
lowercase = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__lowerCAmelCase : str =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 364
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340
| 0
|
def lowerCAmelCase__ ( ):
snake_case_ : str = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
snake_case_ : Optional[int] = 6
snake_case_ : List[str] = 1
snake_case_ : Any = 19_01
snake_case_ : Union[str, Any] = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case_ : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
snake_case_ : str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
snake_case_ : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
snake_case_ : Optional[Any] = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 36
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 36
| 1
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ )
| 74
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ :str = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __a ( UpperCAmelCase ):
_a : int = 'altclip_text_model'
def __init__( self , _SCREAMING_SNAKE_CASE=250002 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=514 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-0_5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=768 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = project_dim
class __a ( UpperCAmelCase ):
_a : Optional[Any] = 'altclip_vision_model'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = projection_dim
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = hidden_act
@classmethod
def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
_UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class __a ( UpperCAmelCase ):
_a : int = 'altclip'
_a : Optional[int] = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=2.6592 , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('text_config_dict' , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = kwargs.pop('vision_config_dict' , _SCREAMING_SNAKE_CASE )
super().__init__(**_SCREAMING_SNAKE_CASE )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_UpperCAmelCase = {}
# This is the complete result when using `text_config_dict`.
_UpperCAmelCase = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_UpperCAmelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCAmelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_UpperCAmelCase = {}
# This is the complete result when using `vision_config_dict`.
_UpperCAmelCase = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_UpperCAmelCase = {
str(_SCREAMING_SNAKE_CASE ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_UpperCAmelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCAmelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(_SCREAMING_SNAKE_CASE )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_UpperCAmelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
_UpperCAmelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
_UpperCAmelCase = AltCLIPTextConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AltCLIPVisionConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = projection_dim
_UpperCAmelCase = logit_scale_init_value
_UpperCAmelCase = 1.0
@classmethod
def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 185
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ :Dict = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def lowerCAmelCase__ ( a__: Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser('tpu-config' , description=_description )
else:
_UpperCAmelCase = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_UpperCAmelCase = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=a__ , default=a__ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=a__ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=a__ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_UpperCAmelCase = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=a__ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowerCAmelCase__ ( a__: str ) -> Any:
'''simple docstring'''
_UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
_UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase = defaults.commands
if not args.tpu_name:
_UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
_UpperCAmelCase = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , a__ ):
_UpperCAmelCase = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
_UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_UpperCAmelCase = '; '.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {" ".join(a__ )}''' )
return
subprocess.run(a__ )
print('Successfully setup pod.' )
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tpu_command_parser()
_UpperCAmelCase = parser.parse_args()
tpu_command_launcher(a__ )
| 185
| 1
|
"""simple docstring"""
from manim import *
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Rectangle(height=0.5 , width=0.5 )
_lowerCamelCase : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCamelCase : List[Any] = Rectangle(height=0.25 , width=0.25 )
_lowerCamelCase : int = [mem.copy() for i in range(6 )]
_lowerCamelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCamelCase : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Dict = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Any = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : List[str] = Text('''CPU''' , font_size=2_4 )
_lowerCamelCase : Any = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [mem.copy() for i in range(4 )]
_lowerCamelCase : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : List[str] = Text('''GPU''' , font_size=2_4 )
_lowerCamelCase : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : List[str] = [mem.copy() for i in range(6 )]
_lowerCamelCase : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Optional[Any] = Text('''Model''' , font_size=2_4 )
_lowerCamelCase : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
_lowerCamelCase : List[str] = []
_lowerCamelCase : List[str] = []
for i, rect in enumerate(__lowerCAmelCase ):
_lowerCamelCase : List[str] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
_lowerCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
_lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )]
_lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )]
_lowerCamelCase : Optional[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Optional[int] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : Optional[Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
_lowerCamelCase : str = Text('''Disk''' , font_size=2_4 )
_lowerCamelCase : List[Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCamelCase : List[Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
_lowerCamelCase : Tuple = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowerCamelCase : Any = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
_lowerCamelCase : List[str] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowerCamelCase : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowerCamelCase : int = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowerCamelCase : Dict = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowerCamelCase : Dict = a_c
_lowerCamelCase : List[str] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
_lowerCamelCase : int = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 72
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCamelCase = {
"""gpt-neox-20b""": 2048,
}
class _lowerCamelCase ( _a ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )->Optional[Any]:
'''simple docstring'''
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
A_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case_ ) != add_prefix_space:
A_ : Dict = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
A_ : int = add_prefix_space
A_ : List[Any] = pre_tok_class(**snake_case_ )
A_ : List[str] = add_prefix_space
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case_ , add_special_tokens=snake_case_ ) + [self.eos_token_id] )
if len(snake_case_ ) > self.model_max_length:
A_ : int = input_ids[-self.model_max_length :]
return input_ids
| 371
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
require_version(deps[pkg] , SCREAMING_SNAKE_CASE )
| 65
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ : Dict = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
a_ , a_ , a_ , a_ : Union[str, Any] = hidden_states.shape
a_ : List[str] = jax.image.resize(
SCREAMING_SNAKE_CASE__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
a_ : Any = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
a_ : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a_ : str = self.conv(SCREAMING_SNAKE_CASE__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
a_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : Any = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : Optional[int] = nn.Dense(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
a_ : Union[str, Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
a_ : int = nn.Dropout(self.dropout_prob )
a_ : Optional[Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a_ : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a_ : List[Any] = None
if use_nin_shortcut:
a_ : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> int:
a_ : List[Any] = hidden_states
a_ : Any = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Any = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE__ )
a_ : int = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE__ ) )
a_ : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 1 ) , 1 )
a_ : Optional[int] = hidden_states + temb
a_ : List[str] = self.norma(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = nn.swish(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = self.dropout(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.conva(SCREAMING_SNAKE_CASE__ )
if self.conv_shortcut is not None:
a_ : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE__ )
return hidden_states + residual
| 32
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ):
# Load configuration defined in the metadata file
with open(_lowerCAmelCase ) as metadata_file:
snake_case_ : Dict = json.load(_lowerCAmelCase )
snake_case_ : List[Any] = LukeConfig(use_entity_aware_attention=_lowerCAmelCase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
snake_case_ : List[Any] = torch.load(_lowerCAmelCase , map_location='cpu' )["module"]
# Load the entity vocab file
snake_case_ : List[Any] = load_original_entity_vocab(_lowerCAmelCase )
# add an entry for [MASK2]
snake_case_ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case_ : Tuple = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ : str = AddedToken('<ent>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
snake_case_ : Any = AddedToken('<ent2>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'r' ) as f:
snake_case_ : Optional[int] = json.load(_lowerCAmelCase )
snake_case_ : Dict = "MLukeTokenizer"
with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
snake_case_ : Dict = MLukeTokenizer.from_pretrained(_lowerCAmelCase )
# Initialize the embeddings of the special tokens
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(['@'] )[0]
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(['#'] )[0]
snake_case_ : Any = state_dict["embeddings.word_embeddings.weight"]
snake_case_ : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case_ : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case_ : int = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case_ : List[str] = state_dict[bias_name]
snake_case_ : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case_ : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ : Dict = f"""encoder.layer.{layer_index}.attention.self."""
snake_case_ : Union[str, Any] = state_dict[prefix + matrix_name]
snake_case_ : Optional[int] = state_dict[prefix + matrix_name]
snake_case_ : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ : List[str] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case_ : List[Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case_ : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case_ : List[str] = state_dict["entity_predictions.bias"]
snake_case_ : List[Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case_ : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case_ : str = LukeForMaskedLM(config=_lowerCAmelCase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
snake_case_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
snake_case_ : Dict = state_dict[key]
else:
snake_case_ : Optional[int] = state_dict[key]
snake_case_ : Optional[int] = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if set(_lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case_ : List[Any] = MLukeTokenizer.from_pretrained(_lowerCAmelCase , task='entity_classification' )
snake_case_ : Optional[Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
snake_case_ : int = (0, 9)
snake_case_ : Dict = tokenizer(_lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
snake_case_ : Any = model(**_lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ : Dict = torch.Size((1, 33, 7_68) )
snake_case_ : List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ : Union[str, Any] = torch.Size((1, 1, 7_68) )
snake_case_ : Any = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case_ : Tuple = MLukeTokenizer.from_pretrained(_lowerCAmelCase )
snake_case_ : Any = "Tokyo is the capital of <mask>."
snake_case_ : List[Any] = (24, 30)
snake_case_ : Dict = tokenizer(_lowerCAmelCase , entity_spans=[span] , return_tensors='pt' )
snake_case_ : Any = model(**_lowerCAmelCase )
snake_case_ : Optional[Any] = encoding["input_ids"][0].tolist()
snake_case_ : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
snake_case_ : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCAmelCase )
snake_case_ : int = outputs.entity_logits[0][0].argmax().item()
snake_case_ : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_lowerCAmelCase ) )
model.save_pretrained(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = ["[MASK]", "[PAD]", "[UNK]"]
snake_case_ : int = [json.loads(_lowerCAmelCase ) for line in open(_lowerCAmelCase )]
snake_case_ : str = {}
for entry in data:
snake_case_ : str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case_ : str = entity_id
break
snake_case_ : Any = f"""{language}:{entity_name}"""
snake_case_ : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 354
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
| 0
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase__ = True
lowerCamelCase__ = 'ml.p3.2xlarge'
lowerCamelCase__ = 'accelerate_sagemaker_execution_role'
lowerCamelCase__ = 'hf-sm'
lowerCamelCase__ = 'us-east-1'
lowerCamelCase__ = 1
lowerCamelCase__ = 'accelerate-sagemaker-1'
lowerCamelCase__ = '1.6'
lowerCamelCase__ = '4.4'
lowerCamelCase__ = 'train.py'
lowerCamelCase__ = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
lowerCamelCase__ = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args["model_name_or_path"], __a)
assert isinstance(converted_args["do_train"], __a)
assert isinstance(converted_args["epochs"], __a)
assert isinstance(converted_args["learning_rate"], __a)
assert isinstance(converted_args["max_steps"], __a)
with pytest.raises(__a):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args)
| 36
|
import argparse
import copy
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {}
with open(_lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowerCAmelCase : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowerCAmelCase : str = []
_list.append([line.split()[0], line.split()[2]] )
_lowerCAmelCase : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = f.read(1 )
_lowerCAmelCase : str = start_node
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = start_node
_lowerCAmelCase : str = 0
while visiting not in first_solution:
_lowerCAmelCase : Dict = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_lowerCamelCase ) and k[0] not in first_solution:
_lowerCAmelCase : List[str] = k[1]
_lowerCAmelCase : List[Any] = k[0]
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = distance_of_first_solution + int(_lowerCamelCase )
_lowerCAmelCase : str = best_node
first_solution.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowerCAmelCase : Tuple = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for n in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
for kn in solution[1:-1]:
_lowerCAmelCase : Dict = solution.index(_lowerCamelCase )
if n == kn:
continue
_lowerCAmelCase : Optional[int] = copy.deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = kn
_lowerCAmelCase : Dict = n
_lowerCAmelCase : Optional[int] = 0
for k in _tmp[:-1]:
_lowerCAmelCase : str = _tmp[_tmp.index(_lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowerCAmelCase : Optional[Any] = distance + int(i[1] )
_tmp.append(_lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowerCAmelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : int = first_solution
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = distance_of_first_solution
_lowerCAmelCase : Optional[int] = solution
while count <= iters:
_lowerCAmelCase : int = find_neighborhood(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = neighborhood[index_of_best_solution]
_lowerCAmelCase : int = len(_lowerCamelCase ) - 1
_lowerCAmelCase : Union[str, Any] = False
while not found:
_lowerCAmelCase : Tuple = 0
while i < len(_lowerCamelCase ):
if best_solution[i] != solution[i]:
_lowerCAmelCase : str = best_solution[i]
_lowerCAmelCase : Tuple = solution[i]
break
_lowerCAmelCase : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = best_solution[:-1]
_lowerCAmelCase : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowerCAmelCase : Union[str, Any] = cost
_lowerCAmelCase : List[Any] = solution
else:
_lowerCAmelCase : Optional[Any] = index_of_best_solution + 1
_lowerCAmelCase : Optional[Any] = neighborhood[index_of_best_solution]
if len(_lowerCamelCase ) >= size:
tabu_list.pop(0 )
_lowerCAmelCase : int = count + 1
return best_solution_ever, best_cost
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = generate_neighbours(args.File )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_first_solution(
args.File , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = tabu_search(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 36
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LayoutLMv3FeatureExtractor''']
lowerCamelCase_ = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 34
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase_ = logging.getLogger(__name__)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = 'masked_bert'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="topK" , lowerCamelCase="constant" , lowerCamelCase=0.0 , **lowerCamelCase , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale
| 34
| 1
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
|
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 185
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , head_mask=_lowercase )
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTDoubleHeadsModel(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = OpenAIGPTForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_lowercase : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_lowercase : int = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _lowercase ( self , _lowercase , _lowercase , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase , )
_lowerCAmelCase = inputs_dict["""labels"""]
_lowerCAmelCase = inputs_dict["""labels"""]
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_lowercase , )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , n_embd=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = OpenAIGPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_lowercase )
_lowerCAmelCase = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_lowercase ) # the president is
_lowerCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCAmelCase = model.generate(_lowercase , do_sample=_lowercase )
self.assertListEqual(output_ids[0].tolist() , _lowercase )
| 229
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=32 , _lowercase=3 , _lowercase=4 , _lowercase=[10, 20, 30, 40] , _lowercase=[2, 2, 3, 2] , _lowercase=True , _lowercase=True , _lowercase=37 , _lowercase="gelu" , _lowercase=10 , _lowercase=0.02 , _lowercase=["stage2", "stage3", "stage4"] , _lowercase=3 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_stages
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = out_features
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = num_stages
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _lowercase ( self ):
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = UperNetForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowercase : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowercase : Dict = False
_lowercase : Optional[Any] = False
_lowercase : List[str] = False
_lowercase : Union[str, Any] = False
_lowercase : List[str] = False
_lowercase : List[Any] = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = UperNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
_lowerCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = _config_zero_init(_lowercase )
_lowerCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(config=_lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _lowercase ( self ):
"""simple docstring"""
pass
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def A ():
_lowerCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_lowerCAmelCase = Image.open(__lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(_lowercase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_lowerCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(_lowercase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
with torch.no_grad():
_lowerCAmelCase = model(**_lowercase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowerCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 ) )
| 229
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : Any ={
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_A : Optional[Any] =[
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_A : Optional[int] =[
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_A : List[Any] =[
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path", type=__A, default="data/dump.txt", help="The path to the data." )
parser.add_argument("--tokenizer_type", type=__A, default="bert", choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name", type=__A, default="bert-base-uncased", help="The tokenizer to use." )
parser.add_argument("--dump_file", type=__A, default="data/dump", help="The dump file prefix." )
UpperCAmelCase__ = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
UpperCAmelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase__ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase__ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase__ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path, "r", encoding="utf8" ) as fp:
UpperCAmelCase__ = fp.readlines()
logger.info("Start encoding" )
logger.info(f"""{len(__A )} examples to process.""" )
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 10_000
UpperCAmelCase__ = time.time()
for text in data:
UpperCAmelCase__ = f"""{bos} {text.strip()} {sep}"""
UpperCAmelCase__ = tokenizer.encode(__A, add_special_tokens=__A )
rslt.append(__A )
iter += 1
if iter % interval == 0:
UpperCAmelCase__ = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
UpperCAmelCase__ = time.time()
logger.info("Finished binarization" )
logger.info(f"""{len(__A )} examples processed.""" )
UpperCAmelCase__ = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
UpperCAmelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase__ = [np.uintaa(__A ) for d in rslt]
else:
UpperCAmelCase__ = [np.intaa(__A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(__A, "wb" ) as handle:
pickle.dump(rslt_, __A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 65
| 0
|
'''simple docstring'''
class A_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> List[Any]:
UpperCAmelCase : str = None
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Tuple = graph
self._normalize_graph(lowercase_ , lowercase_ )
UpperCAmelCase : Any = len(lowercase_ )
UpperCAmelCase : Union[str, Any] = None
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> Optional[int]:
if sources is int:
UpperCAmelCase : int = [sources]
if sinks is int:
UpperCAmelCase : List[Any] = [sinks]
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
return
UpperCAmelCase : Tuple = sources[0]
UpperCAmelCase : int = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase_ ) > 1 or len(lowercase_ ) > 1:
UpperCAmelCase : Union[str, Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCAmelCase : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCAmelCase : Optional[int] = max_input_flow
UpperCAmelCase : str = 0
UpperCAmelCase : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCAmelCase : List[Any] = max_input_flow
UpperCAmelCase : int = size - 1
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self : Dict , lowercase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = algorithm(self )
class A_ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase_ : str ) -> str:
UpperCAmelCase : Optional[Any] = flow_network
UpperCAmelCase : int = flow_network.verticesCount
UpperCAmelCase : Optional[int] = flow_network.sourceIndex
UpperCAmelCase : Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCAmelCase : Optional[int] = flow_network.graph
UpperCAmelCase : Tuple = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
if not self.executed:
self._algorithm()
UpperCAmelCase : Optional[int] = True
def UpperCAmelCase_ ( self : Tuple ) -> str:
pass
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Tuple ) -> int:
super().__init__(lowercase_ )
# use this to save your result
UpperCAmelCase : int = -1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Any ) -> Tuple:
super().__init__(lowercase_ )
UpperCAmelCase : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCAmelCase : Optional[int] = [0] * self.verticies_count
UpperCAmelCase : Optional[int] = [0] * self.verticies_count
def UpperCAmelCase_ ( self : str ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCAmelCase : Tuple = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCAmelCase : int = 0
while i < len(lowercase_ ):
UpperCAmelCase : List[Any] = vertices_list[i]
UpperCAmelCase : str = self.heights[vertex_index]
self.process_vertex(lowercase_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase_ ) )
UpperCAmelCase : Optional[Any] = 0
else:
i += 1
UpperCAmelCase : Union[str, Any] = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[Any] ) -> int:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase_ , lowercase_ )
self.relabel(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any ) -> str:
UpperCAmelCase : Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCAmelCase : Tuple = self.heights[to_index]
if min_height is not None:
UpperCAmelCase : List[str] = min_height + 1
if __name__ == "__main__":
lowercase__ = [0]
lowercase__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase__ = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 364
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : DDIMScheduler ) -> int:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : str , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
UpperCAmelCase : str = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowercase_ , )
UpperCAmelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase : Optional[Any] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowercase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase : Tuple = {}
if accepts_eta:
UpperCAmelCase : List[str] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase : Dict = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
UpperCAmelCase : Dict = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase : Any = self.vqvae.decode(lowercase_ ).sample
UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 280
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Accelerator , SCREAMING_SNAKE_CASE_: int = 1_6 ) -> Tuple:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_: Dict ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_: Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple ) -> str:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
A__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
set_seed(SCREAMING_SNAKE_CASE_ )
A__ , A__ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split("." )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**SCREAMING_SNAKE_CASE_ )
A__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
"epoch": epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=SCREAMING_SNAKE_CASE_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_a = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_a = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_a = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase( datasets.Metric ):
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''')),
'''references''': datasets.Sequence(datasets.Value('''int32''')),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def UpperCAmelCase ( self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = fa_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a)
return {"f1": float(__a) if score.size == 1 else score}
| 356
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
lowercase__ = 'CIDAS/clipseg-rd64-refined'
lowercase__ = 'image_segmenter'
lowercase__ = CLIPSegForImageSegmentation
lowercase__ = ['image', 'text']
lowercase__ = ['image']
def __init__( self , *__a , **__a) -> Any:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase = self.model(**__a).logits
return logits
def UpperCAmelCase ( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = outputs.cpu().detach().numpy()
_UpperCamelCase = 0
_UpperCamelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta))
| 100
| 0
|
'''simple docstring'''
def snake_case_ (_a : int ):
if isinstance(_a , _a ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(_a , _a ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
UpperCAmelCase = False
if num < 0:
UpperCAmelCase = True
UpperCAmelCase = -num
UpperCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_a ) for e in binary )
return "0b" + "".join(str(_a ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34
| 1
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 361
|
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57
| 0
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 1_00 * 2**20, 9_00 * 2**20] )
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : int ) -> List[str]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , snake_case_ )
__lowerCAmelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowerCAmelCase = dataset_size < in_memory_max_size
else:
__lowerCAmelCase = False
__lowerCAmelCase = is_small_dataset(snake_case_ )
assert result == expected
| 229
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def a ( self : Any ) -> int:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a ( self : Dict ) -> int:
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = PNDMScheduler()
__lowerCAmelCase = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type="""numpy""" ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=SCREAMING_SNAKE_CASE__ )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Any ) -> Any:
__lowerCAmelCase = """google/ddpm-cifar10-32"""
__lowerCAmelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = PNDMScheduler()
__lowerCAmelCase = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pndm(generator=SCREAMING_SNAKE_CASE__ , output_type="""numpy""" ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 229
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( snake_case_ ):
"""simple docstring"""
def __A ( self : Optional[int] ):
A_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , "width_multiplier" ) )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Optional[int]=64 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=3 , UpperCAmelCase : Any="swish" , UpperCAmelCase : Tuple=3 , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=10 , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=0.25 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Tuple=0.0 , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = make_divisible(512 * width_multiplier , divisor=8 )
A_ = hidden_act
A_ = conv_kernel_size
A_ = output_stride
A_ = classifier_dropout_prob
A_ = use_labels
A_ = is_training
A_ = num_labels
A_ = initializer_range
A_ = scope
A_ = width_multiplier
A_ = ffn_dropout
A_ = attn_dropout
def __A ( self : List[Any] ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Union[str, Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ):
A_ = MobileViTVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
A_ = self.num_labels
A_ = MobileViTVaForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
A_ = self.num_labels
A_ = MobileViTVaForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
A_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : List[str] ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : int = False
_lowerCamelCase : List[str] = False
_lowerCamelCase : List[Any] = False
def __A ( self : Union[str, Any] ):
A_ = MobileViTVaModelTester(self )
A_ = MobileViTVaConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def __A ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __A ( self : Tuple ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __A ( self : Tuple ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __A ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __A ( self : List[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self : Union[str, Any] ):
pass
def __A ( self : Optional[int] ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCAmelCase )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __A ( self : List[str] ):
def check_hidden_states_output(UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Any ):
A_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
A_ = outputs.hidden_states
A_ = 5
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A_ = 2
for i in range(len(UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __A ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def __A ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@slow
def __A ( self : List[str] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = MobileViTVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __snake_case ( ):
"""simple docstring"""
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : int ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __A ( self : Optional[Any] ):
A_ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
UpperCAmelCase )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(**UpperCAmelCase )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
A_ = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : Any ):
A_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A_ = model.to(UpperCAmelCase )
A_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(**UpperCAmelCase )
A_ = outputs.logits
# verify the logits
A_ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase )
A_ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self : Union[str, Any] ):
A_ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A_ = model.to(UpperCAmelCase )
A_ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A_ = prepare_img()
A_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
A_ = model(**UpperCAmelCase )
A_ = outputs.logits.detach().cpu()
A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(50, 60)] )
A_ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
A_ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
| 329
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : torch.FloatTensor
_lowerCamelCase : Optional[torch.FloatTensor] = None
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCamelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCamelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A_ = []
for i in range(__UpperCamelCase ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) )
return torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
A_ = betas_for_alpha_bar(UpperCAmelCase )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
A_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A_ = 1.0
# setable values
A_ = None
A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() )
A_ = variance_type
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
A_ = num_inference_steps
A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ):
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) )
A_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A_ = variance.log()
A_ = beta.log()
A_ = (predicted_variance + 1) / 2
A_ = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ):
A_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 )
else:
A_ = None
# 1. compute alphas, betas
if prev_timestep is None:
A_ = t - 1
A_ = self.alphas_cumprod[t]
A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A_ = 1 - alpha_prod_t
A_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A_ = self.betas[t]
A_ = self.alphas[t]
else:
A_ = 1 - alpha_prod_t / alpha_prod_t_prev
A_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A_ = torch.clamp(
UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A_ = 0
if t > 0:
A_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device )
A_ = self._get_variance(
UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
A_ = variance
elif self.variance_type == "learned_range":
A_ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
A_ = variance * variance_noise
A_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A_ = timesteps.to(original_samples.device )
A_ = alphas_cumprod[timesteps] ** 0.5
A_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_alpha_prod.unsqueeze(-1 )
A_ = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329
| 1
|
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ) -> int:
A_ : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : str = 0
while b > 0:
if b & 1:
A_ : str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 300
|
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
return str(a ) == str(a )[::-1]
def _SCREAMING_SNAKE_CASE ( a ) -> int:
return int(a ) + int(str(a )[::-1] )
def _SCREAMING_SNAKE_CASE ( a = 1_00_00 ) -> int:
__A : int = []
for num in range(1 , a ):
__A : List[str] = 0
__A : List[Any] = num
while iterations < 50:
__A : str = sum_reverse(a )
iterations += 1
if is_palindrome(a ):
break
else:
lychrel_nums.append(a )
return len(a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280
| 0
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase : Optional[int] = Vector()
def __lowerCamelCase ( self ):
lowercase : int = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '''(0,0,0,0,0,1)''' )
def __lowerCamelCase ( self ):
lowercase : int = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = Vector([1, 2] )
lowercase : Union[str, Any] = Vector([1, 2, 3, 4, 5] )
lowercase : str = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase : int = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowerCamelCase ( self ):
lowercase : Any = Vector([1, 2, 3] )
lowercase : Dict = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowerCamelCase ( self ):
lowercase : Dict = Vector([1, 2, 3] )
lowercase : str = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = Vector([1, 2, 3] )
lowercase : Union[str, Any] = Vector([2, -1, 4] ) # for test of dot product
lowercase : List[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowerCamelCase ( self ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowerCamelCase ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = Vector([1, 2, 3] )
lowercase : List[str] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) , '''(3,4,7)''' )
def __lowerCamelCase ( self ):
lowercase : int = Vector([1, 0, 0, 0, 0, 0] )
lowercase : Optional[Any] = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : str = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE__ ) , '''(0,1,0)''' )
def __lowerCamelCase ( self ):
lowercase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : int = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : Tuple = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowerCamelCase ( self ):
lowercase : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase : int = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowerCamelCase ( self ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 363
|
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( _UpperCamelCase = 2000000 ) ->int:
"""simple docstring"""
lowercase : list[int] = [0]
lowercase : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase : int = 0
# an estimate of b, using the quadratic formula
lowercase : float
# the largest integer less than b_estimate
lowercase : int
# the largest integer less than b_estimate
lowercase : int
# the triangle number corresponding to b_floor
lowercase : int
# the triangle number corresponding to b_ceil
lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
lowercase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase : str = floor(_UpperCamelCase )
lowercase : int = ceil(_UpperCamelCase )
lowercase : str = triangle_numbers[b_floor]
lowercase : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Optional[int] = triangle_b_first_guess * triangle_a
lowercase : Tuple = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Dict = triangle_b_second_guess * triangle_a
lowercase : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase_ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
lowercase_ = ['''pixel_values''']
def __init__(self : int , UpperCAmelCase_ : str = True , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Any = PILImageResampling.BILINEAR , UpperCAmelCase_ : str = True , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Tuple = True , UpperCAmelCase_ : Any = 1 / 255 , UpperCAmelCase_ : Optional[int] = True , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Optional[Any] = None , **UpperCAmelCase_ : Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(**lowerCAmelCase__)
lowerCamelCase__: Optional[int] =size if size is not None else {"shortest_edge": 224}
lowerCamelCase__: Union[str, Any] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
lowerCamelCase__: Dict =crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__: Union[str, Any] =get_size_dict(lowerCAmelCase__ , param_name="crop_size")
lowerCamelCase__: Union[str, Any] =do_resize
lowerCamelCase__: List[Any] =size
lowerCamelCase__: List[str] =do_center_crop
lowerCamelCase__: Any =crop_size
lowerCamelCase__: List[str] =resample
lowerCamelCase__: Tuple =do_rescale
lowerCamelCase__: List[str] =rescale_factor
lowerCamelCase__: Optional[Any] =do_normalize
lowerCamelCase__: Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__: Optional[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] = PILImageResampling.BILINEAR , UpperCAmelCase_ : int = None , **UpperCAmelCase_ : Optional[int] , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "shortest_edge" in size:
lowerCamelCase__: Dict =get_resize_output_image_size(lowerCAmelCase__ , size["shortest_edge"] , default_to_square=lowerCAmelCase__)
elif "height" in size and "width" in size:
lowerCamelCase__: str =(size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""")
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] = None , **UpperCAmelCase_ : Any , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""")
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple = None , **UpperCAmelCase_ : List[Any] , ) ->str:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int = None , **UpperCAmelCase_ : Optional[Any] , ) ->List[Any]:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : Any = None , UpperCAmelCase_ : int = ChannelDimension.FIRST , ) ->List[Any]:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
lowerCamelCase__: Optional[int] =to_numpy_array(lowerCAmelCase__)
if do_resize:
lowerCamelCase__: Tuple =self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__)
if do_center_crop:
lowerCamelCase__: Optional[int] =self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__)
if do_rescale:
lowerCamelCase__: str =self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__)
if do_normalize:
lowerCamelCase__: Union[str, Any] =self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__)
lowerCamelCase__: Any =to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__)
return image
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Dict = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : List[str] = None , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Tuple = None , UpperCAmelCase_ : Union[str, Any] = ChannelDimension.FIRST , **UpperCAmelCase_ : Any , ) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowerCamelCase__: Dict =resample if resample is not None else self.resample
lowerCamelCase__: List[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__: Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__: str =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__: List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__: List[Any] =image_mean if image_mean is not None else self.image_mean
lowerCamelCase__: Optional[int] =image_std if image_std is not None else self.image_std
lowerCamelCase__: Union[str, Any] =size if size is not None else self.size
lowerCamelCase__: int =get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
lowerCamelCase__: int =crop_size if crop_size is not None else self.crop_size
lowerCamelCase__: Optional[int] =get_size_dict(lowerCAmelCase__ , param_name="crop_size")
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
lowerCamelCase__: List[Any] =make_batched(lowerCAmelCase__)
lowerCamelCase__: Union[str, Any] =[
[
self._preprocess_image(
image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , )
for img in video
]
for video in videos
]
lowerCamelCase__: Optional[int] ={"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 10
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__magic_name__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__SCREAMING_SNAKE_CASE = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
return new_state_dict
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
__SCREAMING_SNAKE_CASE = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:256, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:256]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[256:512, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[256:512]
__SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-256:, :]
__SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-256:]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 800 if """detection""" in checkpoint_url else 1000
__SCREAMING_SNAKE_CASE = target_max_size / current_max_size
__SCREAMING_SNAKE_CASE = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = F.to_tensor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
logger.info("""Converting model...""" )
# load original state dict
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = val
# create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 15
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = {0: """table""", 1: """table rotated"""}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE = 125
__SCREAMING_SNAKE_CASE = 6
__SCREAMING_SNAKE_CASE = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
__SCREAMING_SNAKE_CASE = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
__SCREAMING_SNAKE_CASE = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = Image.open(UpperCamelCase_ ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
__SCREAMING_SNAKE_CASE = (1, 15, 3)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
__SCREAMING_SNAKE_CASE = (1, 125, 7)
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
__SCREAMING_SNAKE_CASE = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__magic_name__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 100
| 0
|
def __lowerCAmelCase ( a__ , a__ ) -> int:
return 1 if input_a == input_a else 0
def __lowerCAmelCase ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 364
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 0
|
"""simple docstring"""
import argparse
lowerCAmelCase__ : List[str] = 'docs/source/_static/js/custom.js'
def a_ ( lowerCamelCase ):
with open(lowerCamelCase , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase__ = f.readlines()
UpperCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCAmelCase__ = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(lowerCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
lowerCAmelCase__ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 98
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 0
|
'''simple docstring'''
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> str:
A_ = {}
def __A ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(lowerCAmelCase__ , ''' -> ''' , ''' -> '''.join([str(lowerCAmelCase__ ) for j in self.vertex[i]] ) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCAmelCase__ )
else:
# else make a new vertex
A_ = [to_vertex]
def __A ( self ) -> None:
A_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
A_ = True
print(lowerCAmelCase__ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 367
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _UpperCAmelCase ( ) -> Dict:
A_ = ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=_UpperCamelCase )
A_ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_UpperCamelCase )
env_command_parser(subparsers=_UpperCamelCase )
launch_command_parser(subparsers=_UpperCamelCase )
tpu_command_parser(subparsers=_UpperCamelCase )
test_command_parser(subparsers=_UpperCamelCase )
# Let's go
A_ = parser.parse_args()
if not hasattr(_UpperCamelCase, '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 18
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( UpperCAmelCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'width_multiplier' ) )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE="swish" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.25 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = make_divisible(512 * width_multiplier , divisor=8 )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = output_stride
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = width_multiplier
_UpperCAmelCase = ffn_dropout
_UpperCAmelCase = attn_dropout
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MobileViTVaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTVaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTVaForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_a : str = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : Optional[Any] = False
_a : str = False
_a : List[str] = False
_a : List[str] = False
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = MobileViTVaModelTester(self )
_UpperCAmelCase = MobileViTVaConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 5
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase = 2
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileViTVaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_UpperCAmelCase = model.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_UpperCAmelCase = model.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] )
_UpperCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 329
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a= {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a= [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a= [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
_a= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[int] = """new-model"""
if is_tf_available():
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = NewModelConfig
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : List[str]) -> Dict:
__snake_case : Any = 'bert-base-cased'
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : List[Any]) -> str:
__snake_case : Optional[int] = 'bert-base-cased'
__snake_case : List[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A)
__snake_case , __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Tuple) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Union[str, Any]) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_A)
__snake_case , __snake_case : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> Union[str, Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(_A)
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : Tuple = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Tuple = TFAutoModelForSequenceClassification.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Optional[Any]) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : List[str] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Any = TFAutoModelForQuestionAnswering.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
@require_tensorflow_probability
def _lowercase (self : List[Any]) -> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : int = TFAutoModelForTableQuestionAnswering.from_pretrained(_A)
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[Any]) -> Optional[Any]:
__snake_case : Optional[int] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Any) -> List[str]:
__snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Optional[Any]) -> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(_A , _A)
__snake_case : int = copy.deepcopy(model.config)
__snake_case : int = ['FunnelBaseModel']
__snake_case : int = TFAutoModel.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : List[Any] = TFAutoModel.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> int:
try:
AutoConfig.register('new-model' , _A)
__snake_case : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
auto_class.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self).get_config()
__snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict())
__snake_case : List[str] = auto_class.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : Tuple = auto_class.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowercase (self : Optional[int]) -> Union[str, Any]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case : Any = TFAutoModel.from_pretrained('bert-base')
def _lowercase (self : str) -> str:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : Optional[Any] = TFAutoModel.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : int) -> Any:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[Any]) -> Any:
with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model'):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def _lowercase (self : str) -> Any:
# Make sure we have cached the model.
__snake_case : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
__snake_case : Optional[int] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
__snake_case : Any = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 95
| 0
|
def _lowercase ( lowercase__ = 1_0_0_0_0_0_0 ):
__lowerCAmelCase : Optional[int] = limit + 1
__lowerCAmelCase : Tuple = [0] * limit
for first_term in range(1 , lowercase__ ):
for n in range(lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : int = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__lowerCAmelCase : int = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 275
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(lowercase )
# Initialize Result
SCREAMING_SNAKE_CASE_: str =[]
# Traverse through all denomination
for denomination in reversed(lowercase ):
# Find denominations
while int(lowercase ) >= int(lowercase ):
total_value -= int(lowercase )
answer.append(lowercase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_UpperCAmelCase = []
_UpperCAmelCase = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
_UpperCAmelCase = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
_UpperCAmelCase = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
_UpperCAmelCase = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
_UpperCAmelCase = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f"""Following is minimal change for {value}: """)
_UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 173
| 0
|
'''simple docstring'''
def _A (lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
assert column_title.isupper()
_a = 0
_a = len(lowerCAmelCase__ ) - 1
_a = 0
while index >= 0:
_a = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 104
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git_vision_model"""
def __init__( self , __magic_name__=7_68 , __magic_name__=30_72 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=2_24 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.0_2 , **__magic_name__ , ) -> Union[str, Any]:
super().__init__(**__magic_name__ )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = patch_size
_a = image_size
_a = initializer_range
_a = attention_dropout
_a = layer_norm_eps
_a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__magic_name__ )
_a , _a = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
_a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """git"""
def __init__( self , __magic_name__=None , __magic_name__=3_05_22 , __magic_name__=7_68 , __magic_name__=6 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10_24 , __magic_name__=0.0_2 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=False , __magic_name__=1_01 , __magic_name__=1_02 , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , pad_token_id=__magic_name__ , **__magic_name__ )
if vision_config is None:
_a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
_a = GitVisionConfig(**__magic_name__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = tie_word_embeddings
_a = num_image_with_embedding
_a = bos_token_id
_a = eos_token_id
def __UpperCAmelCase ( self ) -> List[str]:
_a = copy.deepcopy(self.__dict__ )
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
| 104
| 1
|
"""simple docstring"""
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = generate_pascal_triangle(__snake_case )
for row_idx in range(__snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=" " )
else:
print(triangle[row_idx][col_idx] ,end="" )
print()
def _lowercase ( __snake_case ) -> str:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
__lowerCAmelCase : list[list[int]] = []
for current_row_idx in range(__snake_case ):
__lowerCAmelCase : int = populate_current_row(__snake_case ,__snake_case )
triangle.append(__snake_case )
return triangle
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__lowerCAmelCase : Any = 1, 1
for current_col_idx in range(1 ,__snake_case ):
calculate_current_element(
__snake_case ,__snake_case ,__snake_case ,__snake_case )
return current_row
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> Optional[Any]:
__lowerCAmelCase : Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__lowerCAmelCase : List[str] = triangle[current_row_idx - 1][current_col_idx]
__lowerCAmelCase : Dict = above_to_left_elt + above_to_right_elt
def _lowercase ( __snake_case ) -> int:
if not isinstance(__snake_case ,__snake_case ):
raise TypeError("The input value of \'num_rows\' should be \'int\'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of \'num_rows\' should be greater than or equal to 0" )
__lowerCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 ,__snake_case ):
__lowerCAmelCase : Any = [0] + result[-1] + [0]
__lowerCAmelCase : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
__lowerCAmelCase : int = sum(divmod(__snake_case ,2 ) )
__lowerCAmelCase : str = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
__lowerCAmelCase : Union[str, Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__lowerCAmelCase : Optional[Any] = row_first_half + row_second_half
result.append(__snake_case )
return result
def _lowercase ( ) -> Optional[Any]:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__snake_case ,__snake_case ) -> None:
__lowerCAmelCase : int = F"""{func.__name__}({value})"""
__lowerCAmelCase : Optional[Any] = timeit(F"""__main__.{call}""" ,setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__snake_case ,__snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 269
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 0
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : Optional[int] = CursorInfo()
lowerCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : List[str] = CursorInfo()
lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 364
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
_A = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'trajectory_transformer'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase_=100 , UpperCAmelCase_=5 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=249 , UpperCAmelCase_=6 , UpperCAmelCase_=17 , UpperCAmelCase_=25 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=128 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0006 , UpperCAmelCase_=512 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=50256 , UpperCAmelCase_=50256 , **UpperCAmelCase_ , ) -> List[Any]:
lowerCamelCase : int = vocab_size
lowerCamelCase : List[str] = action_weight
lowerCamelCase : List[Any] = reward_weight
lowerCamelCase : List[str] = value_weight
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : List[str] = block_size
lowerCamelCase : Any = action_dim
lowerCamelCase : List[Any] = observation_dim
lowerCamelCase : Any = transition_dim
lowerCamelCase : int = learning_rate
lowerCamelCase : Union[str, Any] = n_layer
lowerCamelCase : Tuple = n_head
lowerCamelCase : Any = n_embd
lowerCamelCase : Union[str, Any] = embd_pdrop
lowerCamelCase : Optional[int] = attn_pdrop
lowerCamelCase : int = resid_pdrop
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = kaiming_initializer_range
lowerCamelCase : str = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 205
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(__lowerCamelCase ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18
| 0
|
UpperCamelCase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase = concatenate_datasets
UpperCamelCase = DownloadConfig
UpperCamelCase = DownloadManager
UpperCamelCase = DownloadMode
UpperCamelCase = DownloadConfig
UpperCamelCase = DownloadMode
UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """openai/whisper-base"""
_lowercase : List[str] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_lowercase : Dict = """transcriber"""
_lowercase : Tuple = WhisperProcessor
_lowercase : Optional[int] = WhisperForConditionalGeneration
_lowercase : Optional[int] = ["""audio"""]
_lowercase : List[str] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" ).input_features
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.model.generate(inputs=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0]
| 95
|
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = '''vivit'''
def __init__( self : Dict , __a : Dict=224 , __a : List[Any]=32 , __a : List[Any]=[2, 16, 16] , __a : Dict=3 , __a : Optional[Any]=768 , __a : int=12 , __a : Dict=12 , __a : List[Any]=3072 , __a : str="gelu_fast" , __a : List[Any]=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : List[Any]=1E-06 , __a : Union[str, Any]=True , **__a : Dict , ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : int = image_size
__lowercase : Tuple = num_frames
__lowercase : List[Any] = tubelet_size
__lowercase : Dict = num_channels
__lowercase : str = qkv_bias
super().__init__(**__a )
| 306
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.