code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Dict = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['PoolFormerFeatureExtractor']
a : int = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 147 | import collections
import importlib.util
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = 'src/transformers'
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*try:')
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(R'^\s*else:')
def snake_case ( snake_case__ :Optional[Any]) -> List[str]:
if _re_test_backend.search(snake_case__) is None:
return None
_A = [b[0] for b in _re_backend.findall(snake_case__)]
backends.sort()
return "_and_".join(snake_case__)
def snake_case ( snake_case__ :Any) -> Any:
with open(snake_case__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
_A = f.readlines()
_A = 0
while line_index < len(snake_case__) and not lines[line_index].startswith("""_import_structure = {"""):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith("""if TYPE_CHECKING""") and find_backend(lines[line_index]) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__):
_A = _re_one_line_import_struct.search(snake_case__).groups()[0]
_A = re.findall("""\[([^\]]+)\]""" , snake_case__)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """)])
line_index += 1
continue
_A = _re_import_struct_key_value.search(snake_case__)
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """) if len(snake_case__) > 0]
objects.extend(snake_case__)
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING"""):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 4):
_A = lines[line_index]
if _re_import_struct_add_one.search(snake_case__) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__).groups()[0])
elif _re_import_struct_add_many.search(snake_case__) is not None:
_A = _re_import_struct_add_many.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_between_brackets.search(snake_case__) is not None:
_A = _re_between_brackets.search(snake_case__).groups()[0].split(""", """)
_A = [obj[1:-1] for obj in imports if len(snake_case__) > 0]
objects.extend(snake_case__)
elif _re_quote_object.search(snake_case__) is not None:
objects.append(_re_quote_object.search(snake_case__).groups()[0])
elif line.startswith(""" """ * 8 + """\""""):
objects.append(line[9:-3])
elif line.startswith(""" """ * 12 + """\""""):
objects.append(line[13:-3])
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(snake_case__)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith("""else""")
):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
_A = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(""" """ * 8):
_A = lines[line_index]
_A = _re_import.search(snake_case__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 12):
objects.append(line[12:-2])
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case ( snake_case__ :Dict , snake_case__ :int) -> List[Any]:
def find_duplicates(snake_case__ :Union[str, Any]):
return [k for k, v in collections.Counter(snake_case__).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''')
_A = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
_A = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def snake_case ( ) -> int:
_A = []
for root, _, files in os.walk(snake_case__):
if "__init__.py" in files:
_A = os.path.join(snake_case__ , """__init__.py""")
_A = parse_init(snake_case__)
if objects is not None:
_A = analyze_results(*snake_case__)
if len(snake_case__) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(snake_case__))
if len(snake_case__) > 0:
raise ValueError("""\n\n""".join(snake_case__))
def snake_case ( ) -> Optional[Any]:
_A = []
for path, directories, files in os.walk(snake_case__):
for folder in directories:
# Ignore private modules
if folder.startswith("""_"""):
directories.remove(snake_case__)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__) / folder).glob("""*.py"""))) == 0:
continue
_A = str((Path(snake_case__) / folder).relative_to(snake_case__))
_A = short_path.replace(os.path.sep , """.""")
submodules.append(snake_case__)
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(snake_case__) / fname).relative_to(snake_case__))
_A = short_path.replace(""".py""" , """""").replace(os.path.sep , """.""")
if len(submodule.split(""".""")) == 1:
submodules.append(snake_case__)
return submodules
_SCREAMING_SNAKE_CASE = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def snake_case ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
_A = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(snake_case__ , """__init__.py""") , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_A = spec.loader.load_module()
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(snake_case__) > 0:
_A = """\n""".join(F'''- {module}''' for module in module_not_registered)
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""")
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 180 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(snake_case__ )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
lowercase__ : Any= {}
lowercase__ : List[Any]= {}
lowercase__ : Dict= {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__ : Optional[Any]= kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowercase__ : Any= kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowercase__ : Dict= kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowercase__ : Any= kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowercase__ : Optional[int]= kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__ : List[str]= kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowercase__ : Any= kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowercase__ : Tuple= kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowercase__ : str= kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowercase__ : Tuple= kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowercase__ : List[Any]= kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowercase__ : Dict= kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , snake_case__ , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
return super().__call__(snake_case__ , *snake_case__ , num_workers=snake_case__ , batch_size=snake_case__ , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=64 , snake_case__ = 0 , snake_case__ = 512 / 1500 , snake_case__ = 32 , snake_case__ = 1 , ):
'''simple docstring'''
lowercase__ : Optional[int]= load_image(snake_case__ )
lowercase__ : Optional[int]= self.image_processor.size["longest_edge"]
lowercase__, lowercase__, lowercase__, lowercase__ : List[Any]= self.image_processor.generate_crop_boxes(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase__ : Optional[Any]= self.image_processor(images=snake_case__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowercase__ : str= self.get_inference_context()
with inference_context():
lowercase__ : int= self._ensure_tensor_on_device(snake_case__ , device=self.device )
lowercase__ : int= self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowercase__ : Dict= image_embeddings
lowercase__ : List[Any]= grid_points.shape[1]
lowercase__ : Optional[int]= points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , snake_case__ , snake_case__ ):
lowercase__ : Union[str, Any]= grid_points[:, i : i + points_per_batch, :, :]
lowercase__ : List[Any]= input_labels[:, i : i + points_per_batch]
lowercase__ : List[Any]= i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=0.88 , snake_case__=0.95 , snake_case__=0 , snake_case__=1 , ):
'''simple docstring'''
lowercase__ : Union[str, Any]= model_inputs.pop("input_boxes" )
lowercase__ : Any= model_inputs.pop("is_last" )
lowercase__ : Tuple= model_inputs.pop("original_sizes" ).tolist()
lowercase__ : Optional[int]= model_inputs.pop("reshaped_input_sizes" ).tolist()
lowercase__ : List[str]= self.model(**snake_case__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__ : Tuple= model_outputs["pred_masks"]
lowercase__ : Tuple= self.image_processor.post_process_masks(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , binarize=snake_case__ )
lowercase__ : Dict= model_outputs["iou_scores"]
lowercase__, lowercase__, lowercase__ : Any= self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=0.7 , ):
'''simple docstring'''
lowercase__ : Any= []
lowercase__ : Tuple= []
lowercase__ : List[Any]= []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowercase__ : Dict= torch.cat(snake_case__ )
lowercase__ : List[Any]= torch.cat(snake_case__ )
lowercase__, lowercase__, lowercase__, lowercase__ : Dict= self.image_processor.post_process_for_mask_generation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase__ : str= defaultdict(snake_case__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case__ )
lowercase__ : Tuple= {}
if output_rle_mask:
lowercase__ : Optional[int]= rle_mask
if output_bboxes_mask:
lowercase__ : Optional[Any]= bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 150 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Union[str, Any] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase = "src/diffusers"
__lowerCamelCase = "."
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCamelCase = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCamelCase = spec.loader.load_module()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , UpperCamelCase__ ) is not None
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = object_name.split('.' )
A__ = 0
# First let's find the module where our object lives.
A__ = parts[i]
while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCamelCase__ ):
A__ = os.path.join(UpperCamelCase__ , parts[i] )
if i >= len(UpperCamelCase__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCamelCase__ , F'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
A__ = f.readlines()
# Now let's find the class / func in the code!
A__ = ''
A__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase__ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ = line_index
while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ = lines[start_index:line_index]
return "".join(UpperCamelCase__ )
__lowerCamelCase = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__lowerCamelCase = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
__lowerCamelCase = re.compile(R"<FILL\s+[^>]*>")
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = code.split('\n' )
A__ = 0
while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = len(get_indent(UpperCamelCase__ ) ) > 0
if has_indent:
A__ = F'''class Bla:\n{code}'''
A__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ )
A__ = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
A__ , A__ = style_docstrings_in_code(UpperCamelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
with open(UpperCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A__ = f.readlines()
A__ = []
A__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase__ ):
A__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ = search.groups()
A__ = find_code_in_diffusers(UpperCamelCase__ )
A__ = get_indent(UpperCamelCase__ )
A__ = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ = theoretical_indent
A__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ = True
while line_index < len(UpperCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase__ ):
break
A__ = lines[line_index]
A__ = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(F'''^{indent}# End copy''' , UpperCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ = lines[start_index:line_index]
A__ = ''.join(UpperCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(UpperCamelCase__ ) is None]
A__ = '\n'.join(UpperCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase__ ) > 0:
A__ = replace_pattern.replace('with' , '' ).split(',' )
A__ = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ = pattern.groups()
A__ = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if option.strip() == "all-casing":
A__ = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ )
A__ = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ = blackify(lines[start_index - 1] + theoretical_code )
A__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ = start_index + 1
if overwrite and len(UpperCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCamelCase__ )
return diffs
def UpperCAmelCase ( UpperCamelCase__ = False ):
"""simple docstring"""
A__ = glob.glob(os.path.join(UpperCamelCase__ , '**/*.py' ) , recursive=UpperCamelCase__ )
A__ = []
for filename in all_files:
A__ = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCamelCase__ ) > 0:
A__ = '\n'.join(UpperCamelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 221 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 2_55 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__UpperCAmelCase ,size['shortest_edge'] ,default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]:
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
return normalize(__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(__UpperCAmelCase )
if do_resize:
A__ = self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase )
if do_center_crop:
A__ = self.center_crop(__UpperCAmelCase ,size=__UpperCAmelCase )
if do_rescale:
A__ = self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ,offset=__UpperCAmelCase )
if do_normalize:
A__ = self.normalize(image=__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase )
A__ = to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase )
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(__UpperCAmelCase )
A__ = [
[
self._preprocess_image(
image=__UpperCAmelCase ,do_resize=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,do_center_crop=__UpperCAmelCase ,crop_size=__UpperCAmelCase ,do_rescale=__UpperCAmelCase ,rescale_factor=__UpperCAmelCase ,offset=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,image_mean=__UpperCAmelCase ,image_std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,)
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_a : Optional[int] = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] =[
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
__lowerCAmelCase = not kwargs.pop(__SCREAMING_SNAKE_CASE )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
__lowerCAmelCase = kwargs.pop("""tpu_name""",self.tpu_name )
__lowerCAmelCase = kwargs.pop("""device_idx""",self.device_idx )
__lowerCAmelCase = kwargs.pop("""eager_mode""",self.eager_mode )
__lowerCAmelCase = kwargs.pop("""use_xla""",self.use_xla )
super().__init__(**__SCREAMING_SNAKE_CASE )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Name of TPU"""} , )
a : int =field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
a : bool =field(default=lowerCAmelCase_ , metadata={"""help""": """Benchmark models in eager model."""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
requires_backends(self,["""tf"""] )
__lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase = None
return tpu
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
requires_backends(self,["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx],"""GPU""" )
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([],"""GPU""" ) # disable GPU
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' )
return strategy
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
requires_backends(self,["""tf"""] )
return self._setup_tpu is not None
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
requires_backends(self,["""tf"""] )
return self._setup_strategy
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
requires_backends(self,["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
requires_backends(self,["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.n_gpu > 0
| 46 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _lowerCAmelCase ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn
| 46 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase )
if n > 1:
factors.append(lowercase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | """simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase__ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCAmelCase__ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCAmelCase__ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCAmelCase_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Any=None , __lowerCAmelCase : int="warn" , ):
_UpperCAmelCase = recall_score(
__lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase , zero_division=__lowerCAmelCase , )
return {"recall": float(__lowerCAmelCase ) if score.size == 1 else score}
| 289 | 1 |
import functools
def a_ ( _lowercase , _lowercase ):
# Validation
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(__SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(__SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(__SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_UpperCamelCase : Dict = set(__SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_lowercase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def a_ ( ):
_UpperCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCamelCase : int = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 128 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase : str = logging.get_logger(__name__)
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 316 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 312 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """wavlm"""
def __init__( self , lowerCAmelCase=32 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase="group" , lowerCAmelCase="gelu" , lowerCAmelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase=False , lowerCAmelCase=1_28 , lowerCAmelCase=16 , lowerCAmelCase=3_20 , lowerCAmelCase=8_00 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.05 , lowerCAmelCase=10 , lowerCAmelCase=2 , lowerCAmelCase=0.0 , lowerCAmelCase=10 , lowerCAmelCase=3_20 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=1_00 , lowerCAmelCase=2_56 , lowerCAmelCase=2_56 , lowerCAmelCase=0.1 , lowerCAmelCase="mean" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2_56 , lowerCAmelCase=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase=(5, 3, 3, 1, 1) , lowerCAmelCase=(1, 2, 3, 1, 1) , lowerCAmelCase=5_12 , lowerCAmelCase=80 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , lowerCAmelCase=False , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(lowerCAmelCase )
snake_case = list(lowerCAmelCase )
snake_case = list(lowerCAmelCase )
snake_case = conv_bias
snake_case = num_buckets
snake_case = max_bucket_distance
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layerdrop
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = num_ctc_classes
snake_case = vocab_size
snake_case = do_stable_layer_norm
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case = num_codevectors_per_group
snake_case = num_codevector_groups
snake_case = contrastive_logits_temperature
snake_case = num_negatives
snake_case = codevector_dim
snake_case = proj_codevector_dim
snake_case = diversity_loss_weight
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# adapter
snake_case = add_adapter
snake_case = adapter_kernel_size
snake_case = adapter_stride
snake_case = num_adapter_layers
snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case = list(lowerCAmelCase )
snake_case = list(lowerCAmelCase )
snake_case = list(lowerCAmelCase )
snake_case = xvector_output_dim
@property
def snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 361 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
SCREAMING_SNAKE_CASE__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
SCREAMING_SNAKE_CASE__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 149 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A__ :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : str , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Any = 1_3
_UpperCAmelCase : Union[str, Any] = 7
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = True
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Union[str, Any] = 9_9
_UpperCAmelCase : Optional[int] = 3_2
_UpperCAmelCase : List[str] = 2
_UpperCAmelCase : int = 4
_UpperCAmelCase : str = 3_7
_UpperCAmelCase : List[Any] = "gelu"
_UpperCAmelCase : List[str] = 0.1
_UpperCAmelCase : int = 0.1
_UpperCAmelCase : Any = 5_1_2
_UpperCAmelCase : str = 1_6
_UpperCAmelCase : Any = 2
_UpperCAmelCase : List[str] = 0.02
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Optional[int] = None
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = TFDistilBertModel(config=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ )
_UpperCAmelCase : str = [input_ids, input_mask]
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = TFDistilBertForMaskedLM(config=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ )
_UpperCAmelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
_UpperCAmelCase : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : List[str] = TFDistilBertForSequenceClassification(lowerCAmelCase__ )
_UpperCAmelCase : str = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = self.num_choices
_UpperCAmelCase : Union[str, Any] = TFDistilBertForMultipleChoice(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.num_labels
_UpperCAmelCase : Any = TFDistilBertForTokenClassification(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_UpperCAmelCase : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(_UpperCAmelCase) : Tuple = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ : Dict = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : Union[str, Any] = False
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = TFDistilBertModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_UpperCAmelCase : Union[str, Any] = TFDistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
_UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )[0]
_UpperCAmelCase : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) | 361 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : Any = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : List[Any] = self.column_names
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : int = CsvConfig
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise | 17 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__A =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
__A , __A =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
__A =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
__A =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__A =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 226 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = b.T
__UpperCAmelCase : Any = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
__UpperCAmelCase : int = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
__UpperCAmelCase : Optional[int] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = x.reshape(-1 , 3 )
__UpperCAmelCase : Optional[int] = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""pixel_values"""]
def __init__( self : str , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : bool = True , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : Optional[int] = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__UpperCAmelCase : List[str] = get_size_dict(a_ )
__UpperCAmelCase : str = np.array(a_ ) if clusters is not None else None
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = resample
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Optional[int] = do_color_quantize
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
a_ , size=(size['''height'''], size['''width''']) , resample=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Tuple , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
__UpperCAmelCase : Dict = rescale(image=a_ , scale=1 / 1_2_7.5 , data_format=a_ )
__UpperCAmelCase : Union[str, Any] = image - 1
return image
def snake_case__ ( self : int , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **a_ : Any , ):
'''simple docstring'''
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : List[str] = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(a_ )
__UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample
__UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters
__UpperCAmelCase : Any = np.array(a_ )
__UpperCAmelCase : Optional[int] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase : List[Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
__UpperCAmelCase : List[str] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Dict = [self.normalize(image=a_ ) for image in images]
if do_color_quantize:
__UpperCAmelCase : int = [to_channel_dimension_format(a_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase : List[str] = np.array(a_ )
__UpperCAmelCase : Dict = color_quantize(a_ , a_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase : Any = images.shape[0]
__UpperCAmelCase : Any = images.reshape(a_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase : List[Any] = list(a_ )
else:
__UpperCAmelCase : int = [to_channel_dimension_format(a_ , a_ ) for image in images]
__UpperCAmelCase : int = {'''input_ids''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 226 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 198 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def UpperCamelCase ( _A : str )-> List[str]:
"""simple docstring"""
A__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
A__ = MaskFormerConfig(backbone_config=_A )
A__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
A__ = 847
A__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
A__ = 150
A__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
A__ = 171
A__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
A__ = 133
A__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
A__ = 19
A__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
A__ = 65
A__ = "mapillary-vistas-id2label.json"
A__ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
A__ = {int(_A ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( _A : Tuple )-> Union[str, Any]:
"""simple docstring"""
A__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase ( _A : int , _A : List[str] , _A : List[str] )-> Any:
"""simple docstring"""
A__ = dct.pop(_A )
A__ = val
def UpperCamelCase ( _A : Tuple , _A : List[str] )-> Dict:
"""simple docstring"""
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( _A : Tuple , _A : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
A__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( )-> torch.Tensor:
"""simple docstring"""
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _A : str , _A : str , _A : str , _A : bool = False )-> Tuple:
"""simple docstring"""
A__ = get_maskformer_config(_A )
# load original state_dict
with open(_A , "rb" ) as f:
A__ = pickle.load(_A )
A__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A__ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_swin_q_k_v(_A , config.backbone_config )
read_in_decoder_q_k_v(_A , _A )
# update to torch tensors
for key, value in state_dict.items():
A__ = torch.from_numpy(_A )
# load 🤗 model
A__ = MaskFormerForInstanceSegmentation(_A )
model.eval()
for name, param in model.named_parameters():
print(_A , param.shape )
A__ , A__ = model.load_state_dict(_A , strict=_A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_A ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
A__ = prepare_img()
if "vistas" in model_name:
A__ = 65
elif "cityscapes" in model_name:
A__ = 65535
else:
A__ = 255
A__ = True if "ade" in model_name else False
A__ = MaskFormerImageProcessor(ignore_index=_A , reduce_labels=_A )
A__ = image_processor(_A , return_tensors="pt" )
A__ = model(**_A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 198 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase : Any = Mapping[str, np.ndarray]
lowerCAmelCase : int = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_UpperCAmelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_UpperCAmelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_UpperCAmelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_UpperCAmelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
_UpperCAmelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
_UpperCAmelCase : Optional[Sequence[int]] = None
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = R"(\[[A-Z]+\]\n)"
SCREAMING_SNAKE_CASE_: List[str] = [tag.strip() for tag in re.split(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0]
SCREAMING_SNAKE_CASE_: Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE_: List[str] = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: List[str] = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE_: Optional[int] = g[1][0].strip()
for i in range(len(_UpperCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = "X" # FIXME: strings are immutable
SCREAMING_SNAKE_CASE_: Tuple = np.array(
[residue_constants.restype_order.get(_UpperCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE_: List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_UpperCAmelCase , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE_: Any = np.zeros(
(
len(_UpperCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCAmelCase , atom_mask=_UpperCAmelCase , aatype=_UpperCAmelCase , residue_index=np.arange(len(_UpperCAmelCase ) ) , b_factors=_UpperCAmelCase , )
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 0 ):
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: Any = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
SCREAMING_SNAKE_CASE_: Any = prot.parents
SCREAMING_SNAKE_CASE_: Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE_: Optional[int] = [p for i, p in zip(_UpperCAmelCase , _UpperCAmelCase ) if i == chain_id]
if parents is None or len(_UpperCAmelCase ) == 0:
SCREAMING_SNAKE_CASE_: Optional[int] = ["N/A"]
pdb_headers.append(f"PARENT {' '.join(_UpperCAmelCase )}" )
return pdb_headers
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: List[str] = pdb_str.split("\n" )
SCREAMING_SNAKE_CASE_: Optional[int] = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
SCREAMING_SNAKE_CASE_: List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE_: Optional[int] = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE_: Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCAmelCase ) , [] )
parent_dict[str(_UpperCAmelCase )].append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = max([int(_UpperCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = parent_dict.get(str(_UpperCAmelCase ) , ["N/A"] )
parents_per_chain.append(_UpperCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE_: List[Any] = [["N/A"]]
def make_parent_line(_UpperCAmelCase ) -> str:
return f"PARENT {' '.join(_UpperCAmelCase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
for i, l in enumerate(_UpperCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["N/A"]
out_pdb_lines.append(make_parent_line(_UpperCAmelCase ) )
return "\n".join(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = residue_constants.restypes + ["X"]
def res_atoa(_UpperCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
SCREAMING_SNAKE_CASE_: int = residue_constants.atom_types
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: Optional[int] = prot.atom_mask
SCREAMING_SNAKE_CASE_: Optional[Any] = prot.aatype
SCREAMING_SNAKE_CASE_: Optional[Any] = prot.atom_positions
SCREAMING_SNAKE_CASE_: int = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE_: Dict = prot.b_factors
SCREAMING_SNAKE_CASE_: str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
SCREAMING_SNAKE_CASE_: Optional[int] = get_pdb_headers(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
pdb_lines.extend(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = aatype.shape[0]
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: List[Any] = string.ascii_uppercase
SCREAMING_SNAKE_CASE_: int = None
# Add all atom sites.
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE_: List[Any] = "ATOM"
SCREAMING_SNAKE_CASE_: Optional[Any] = atom_name if len(_UpperCAmelCase ) == 4 else f" {atom_name}"
SCREAMING_SNAKE_CASE_: List[str] = ""
SCREAMING_SNAKE_CASE_: Optional[int] = ""
SCREAMING_SNAKE_CASE_: List[str] = 1.0_0
SCREAMING_SNAKE_CASE_: int = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
SCREAMING_SNAKE_CASE_: Dict = "A"
if chain_index is not None:
SCREAMING_SNAKE_CASE_: int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE_: Tuple = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
SCREAMING_SNAKE_CASE_: Optional[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE_: Dict = True
SCREAMING_SNAKE_CASE_: List[str] = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE_: int = "TER"
SCREAMING_SNAKE_CASE_: int = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCAmelCase , _UpperCAmelCase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=_UpperCAmelCase , remark=_UpperCAmelCase , parents=_UpperCAmelCase , parents_chain_index=_UpperCAmelCase , )
| 13 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 308 | 0 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = '''Hello world! cécé herlolip'''
lowerCamelCase_ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def UpperCamelCase( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
snake_case_ = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowercase_ , large=lowercase_ , share_emb=lowercase_ , use_bert_emb=lowercase_ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
snake_case_ = torch.load(lowercase_ , lambda lowercase_ , lowercase_ : storage )
snake_case_ = AbsSummarizer(lowercase_ , torch.device("""cpu""" ) , lowercase_ )
original.eval()
snake_case_ = BertAbsSummarizer(lowercase_ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
snake_case_ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
snake_case_ = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase_ )) )
snake_case_ = torch.tensor(lowercase_ ).unsqueeze(0 )
snake_case_ = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowercase_ )) )
snake_case_ = torch.tensor(lowercase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case_ = encoder_input_ids
snake_case_ = decoder_input_ids
snake_case_ = snake_case_ = None
snake_case_ = None
snake_case_ = snake_case_ = None
snake_case_ = snake_case_ = None
snake_case_ = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case_ = original(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )[0]
snake_case_ = original.generator(lowercase_ )
snake_case_ = new_model(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )[0]
snake_case_ = new_model.generator(lowercase_ )
snake_case_ = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowercase_ ) )
snake_case_ = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowercase_ ) )
snake_case_ = torch.allclose(lowercase_ , lowercase_ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCamelCase_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 358 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase_ = float('''nan''')
class __lowerCamelCase :
def __init__( self , lowerCamelCase ) -> Any:
snake_case_ = sys.stdout
snake_case_ = open(lowerCamelCase , """a""" )
def __getattr__( self , lowerCamelCase ) -> Optional[int]:
return getattr(self.stdout , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase ) -> Optional[int]:
self.stdout.write(lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , lowerCamelCase , 0 , re.M ) )
def UpperCamelCase( lowercase_=80 , lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = []
# deal with critical env vars
snake_case_ = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
snake_case_ = os.environ.get(lowercase_ , lowercase_ )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(lowercase_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ = []
snake_case_ = """"""
while len(lowercase_ ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(lowercase_ ) == 0 or len(lowercase_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase_ )
snake_case_ = """"""
return "\\\n".join(lowercase_ )
def UpperCamelCase( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
snake_case_ = subprocess.run(lowercase_ , capture_output=lowercase_ , text=lowercase_ )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
snake_case_ = variation.replace(""" """ , """-""" )
with open(Path(lowercase_ ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(lowercase_ ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
snake_case_ = json.load(lowercase_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[Any]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ = f'''{preamble}: '''
snake_case_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase_ ) , desc=lowercase_ , leave=lowercase_ ):
snake_case_ = process_run_single(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase_ ):
metrics.append(lowercase_ )
results.append(lowercase_ )
outcome += "✓"
else:
outcome += "✘"
snake_case_ = f'''\33[2K\r{outcome}'''
if len(lowercase_ ) > 0:
snake_case_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ = round(mean_metrics[target_metric_key] , 2 )
snake_case_ = f'''{outcome} {mean_target}'''
if len(lowercase_ ) > 1:
results_str += f''' {tuple(round(lowercase_ , 2 ) for x in results )}'''
print(lowercase_ )
snake_case_ = variation
return mean_metrics
else:
print(lowercase_ )
return {variation_key: variation, target_metric_key: nan}
def UpperCamelCase( ) -> Any:
'''simple docstring'''
snake_case_ = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = pd.DataFrame(lowercase_ )
snake_case_ = """variation"""
snake_case_ = """diff_%"""
snake_case_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase_ ):
# as a fallback, use the minimal value as the sentinel
snake_case_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase_ ):
snake_case_ = df.apply(
lambda lowercase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
snake_case_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ = df.reindex(lowercase_ , axis="""columns""" ) # reorder cols
# capitalize
snake_case_ = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
snake_case_ = df.rename(lambda lowercase_ : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
snake_case_ = df.rename(lambda lowercase_ : c.replace("""_""" , """\n""" ) , axis="""columns""" )
snake_case_ = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase_ , floatfmt=""".2f""" )]
print("""\n\n""".join(lowercase_ ) )
def UpperCamelCase( ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=lowercase_ , type=lowercase_ , nargs="""+""" , required=lowercase_ , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=lowercase_ , type=lowercase_ , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=lowercase_ , type=lowercase_ , required=lowercase_ , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=lowercase_ , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=lowercase_ , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=lowercase_ , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=lowercase_ , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
snake_case_ = parser.parse_args()
snake_case_ = args.output_dir
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
snake_case_ = get_base_command(lowercase_ , lowercase_ )
# split each dimension into its --foo variations
snake_case_ = [list(map(str.strip , re.split(r"""\|""" , lowercase_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ = list(map(str.strip , map(""" """.join , itertools.product(*lowercase_ ) ) ) )
snake_case_ = max(len(lowercase_ ) for x in variations )
# split wanted keys
snake_case_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ = Tee(lowercase_ )
print(f'''\n*** Running {len(lowercase_ )} benchmarks:''' )
print(f'''Base command: {" ".join(lowercase_ )}''' )
snake_case_ = """variation"""
snake_case_ = []
for id, variation in enumerate(tqdm(lowercase_ , desc="""Total completion: """ , leave=lowercase_ ) ):
snake_case_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , args.target_metric_key , lowercase_ , args.repeat_times , lowercase_ , args.verbose , ) )
process_results(lowercase_ , args.target_metric_key , lowercase_ , args.base_variation , lowercase_ )
if __name__ == "__main__":
main() | 34 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Union[str, Any] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Optional[int] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_UpperCamelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 255 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase_ : Dict = logging.get_logger(__name__)
class _a ( __lowerCAmelCase ):
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 142 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ : Optional[Any] = 250004
UpperCamelCase_ : Union[str, Any] = 250020
@require_sentencepiece
@require_tokenizers
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[str] = MBartTokenizerFast
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
def _lowercase ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = MBartTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> Dict:
_snake_case = MBartTokenizer(_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
_snake_case = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_snake_case = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
def _lowercase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_snake_case = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_snake_case = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE ,legacy_format=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
_snake_case = tempfile.mkdtemp()
_snake_case = tokenizer_r.save_pretrained(_SCREAMING_SNAKE_CASE ,legacy_format=_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.save_pretrained(_SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_snake_case = tokenizer_r.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = tokenizer_p.from_pretrained(_SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE_ : Dict = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE_ : Optional[int] = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowercase ( cls ) -> List[str]:
_snake_case = MBartTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="en_XX" ,tgt_lang="ro_RO" )
_snake_case = 1
return cls
def _lowercase ( self ) -> Dict:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] ,250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] ,250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] ,250_020 )
def _lowercase ( self ) -> Tuple:
_snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
self.assertIn(_SCREAMING_SNAKE_CASE ,self.tokenizer.all_special_ids )
_snake_case = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
_snake_case = self.tokenizer.decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
_snake_case = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> List[Any]:
_snake_case = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] ,_SCREAMING_SNAKE_CASE )
_snake_case = 10
_snake_case = self.tokenizer(_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[int]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) ,[250_026, 250_001] )
def _lowercase ( self ) -> str:
_snake_case = tempfile.mkdtemp()
_snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = MBartTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_SCREAMING_SNAKE_CASE )
@require_torch
def _lowercase ( self ) -> Dict:
_snake_case = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,return_tensors="pt" )
_snake_case = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase ( self ) -> Optional[int]:
_snake_case = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=len(self.expected_src_tokens ) ,return_tensors="pt" ,)
_snake_case = shift_tokens_right(batch["labels"] ,self.tokenizer.pad_token_id )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertEqual((2, 14) ,batch.input_ids.shape )
self.assertEqual((2, 14) ,batch.attention_mask.shape )
_snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_SCREAMING_SNAKE_CASE )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase ( self ) -> str:
_snake_case = self.tokenizer(self.src_text ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=3 ,return_tensors="pt" )
_snake_case = self.tokenizer(
text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=10 ,return_tensors="pt" )
_snake_case = targets["input_ids"]
_snake_case = shift_tokens_right(_SCREAMING_SNAKE_CASE ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _lowercase ( self ) -> Any:
_snake_case = self.tokenizer._build_translation_inputs(
"A test" ,return_tensors="pt" ,src_lang="en_XX" ,tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,{
# A, test, EOS, en_XX
"input_ids": [[62, 3_034, 2, 250_004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} ,)
| 142 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A = 16
_A = 32
def lowerCamelCase__ ( a__ : Accelerator , a__ : int = 16 ) -> Tuple:
UpperCamelCase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(a__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=a__ , max_length=a__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
a__ , batched=a__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(a__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
a__ , padding="""longest""" , max_length=a__ , pad_to_multiple_of=a__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=a__ , collate_fn=a__ , batch_size=a__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( a__ : str , a__ : Tuple ) -> Any:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , a__ ) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=a__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["""lr"""]
UpperCamelCase_ = int(config["""num_epochs"""] )
UpperCamelCase_ = int(config["""seed"""] )
UpperCamelCase_ = int(config["""batch_size"""] )
UpperCamelCase_ = evaluate.load("""glue""" , """mrpc""" )
set_seed(a__ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(a__ , a__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=a__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=a__ )
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ )
# Now we train the model
for epoch in range(a__ ):
model.train()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(a__ ):
UpperCamelCase_ = model(**a__ )
UpperCamelCase_ = output.loss
accelerator.backward(a__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**a__ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=a__ , references=a__ , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , a__ )
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=a__ , default=a__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=a__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(a__ , a__ )
if __name__ == "__main__":
main()
| 122 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=0 , ):
'''simple docstring'''
__A : Tuple = parent
__A : Optional[Any] = batch_size
__A : Optional[Any] = seq_length
__A : Optional[Any] = is_training
__A : List[str] = use_input_mask
__A : Tuple = use_token_type_ids
__A : Dict = use_labels
__A : str = vocab_size
__A : Optional[int] = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Any = intermediate_size
__A : int = hidden_act
__A : str = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : List[Any] = type_vocab_size
__A : Dict = type_sequence_label_size
__A : int = initializer_range
__A : int = num_labels
__A : Optional[int] = num_choices
__A : int = scope
__A : int = projection_dim
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__A : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__A : Dict = None
if self.use_token_type_ids:
__A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Optional[Any] = None
__A : Union[str, Any] = None
__A : Optional[int] = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__A : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
__A : str = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = TFDPRContextEncoder(config=_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase)
__A : Tuple = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = TFDPRQuestionEncoder(config=_UpperCAmelCase)
__A : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase)
__A : Optional[int] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase)
__A : int = model(_UpperCAmelCase)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = TFDPRReader(config=_UpperCAmelCase)
__A : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : int = config_and_inputs
__A : Dict = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = TFDPRModelTester(self)
__A : List[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Union[str, Any] = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = TFDPRQuestionEncoder.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[Any] = TFDPRReader.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base')
__A : Union[str, Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]]) # [CLS] hello, is my dog cute? [SEP]
__A : List[Any] = model(_UpperCAmelCase)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__A : Any = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4)) | 190 |
'''simple docstring'''
import argparse
lowercase__ : Any = '''docs/source/_static/js/custom.js'''
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> str:
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[Any] = f.readlines()
__A : List[str] = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__A : Tuple = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowercase__ : List[str] = parser.parse_args()
update_custom_js(args.version) | 190 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "nllb-moe"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=12_8112 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.05 , __A=0.05 , __A=True , __A=True , __A="relu" , __A=1024 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.02 , __A=2 , __A=True , __A=False , __A="float32" , __A=False , __A=128 , __A=64 , __A=4 , __A=4 , __A=0.001 , __A=0.001 , __A="all" , __A=False , __A=False , __A=1.0 , __A=0.2 , __A=1 , __A=0 , __A=2 , __A=False , **__A , ) -> List[Any]:
a =vocab_size
a =max_position_embeddings
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =encoder_layerdrop
a =decoder_layerdrop
a =use_cache
a =encoder_layers
a =scale_embedding # scale factor will be sqrt(d_model) if True
a =router_z_loss_coef
a =router_aux_loss_coef
a =decoder_sparse_step
a =encoder_sparse_step
a =num_experts
a =expert_capacity
a =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
a =router_dtype
a =router_ignore_padding_tokens
a =batch_prioritized_routing
a =second_expert_policy
a =normalize_router_prob_before_dropping
a =moe_eval_capacity_token_fraction
a =moe_token_dropout
a =output_router_logits
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , **__A , ) | 81 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( lowercase : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
lowerCamelCase_ = []
if isinstance(lowercase , lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Tuple[int, ...] ):
'''simple docstring'''
lowerCamelCase_ = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
lowerCamelCase_ = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Optional[Sequence[bool]] = None , lowercase : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(lowercase : List[bool] ) -> None:
lowerCamelCase_ = True
for i in range(len(lowercase ) ):
lowerCamelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase_ = l[reversed_idx]
if start_edges is None:
lowerCamelCase_ = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
lowerCamelCase_ = [e == (d - 1) for e, d in zip(lowercase , lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase_ = []
lowerCamelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase , lowercase ):
if s == e:
path_list.append(slice(lowercase , s + 1 ) )
else:
break
lowerCamelCase_ = tuple(lowercase )
lowerCamelCase_ = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = start[divergence_idx]
return tuple(
path + (slice(lowercase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = end[divergence_idx]
return tuple(
path + (slice(lowercase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : torch.Tensor , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = t.shape[:no_batch_dims]
lowerCamelCase_ = list(_flat_idx_to_idx(lowercase , lowercase ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase_ = list(_flat_idx_to_idx(flat_end - 1 , lowercase ) )
# Get an ordered list of slices to perform
lowerCamelCase_ = _get_minimal_slice_set(
lowercase , lowercase , lowercase , )
lowerCamelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : Dict[str, Any] , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : Any = None , lowercase : bool = False , ):
'''simple docstring'''
if not (len(lowercase ) > 0):
raise ValueError('Must provide at least one input' )
lowerCamelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
lowerCamelCase_ = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase_ = tensor_tree_map(_prep_inputs , lowercase )
lowerCamelCase_ = None
if _out is not None:
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase_ = 0
lowerCamelCase_ = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
lowerCamelCase_ = _select_chunk
else:
lowerCamelCase_ = partial(
_chunk_slice , flat_start=lowercase , flat_end=min(lowercase , i + chunk_size ) , no_batch_dims=len(lowercase ) , )
lowerCamelCase_ = tensor_tree_map(lowercase , lowercase )
# Run the layer on the chunk
lowerCamelCase_ = layer(**lowercase )
# Allocate space for the output
if out is None:
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase , lowercase ):
def assign(lowercase : dict , lowercase : dict ) -> None:
for k, v in da.items():
if isinstance(lowercase , lowercase ):
assign(lowercase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase_ = da[k]
assign(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for xa, xa in zip(lowercase , lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase_ = xa
elif isinstance(lowercase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase_ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) , lowercase )
return out
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int = 512 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = max_chunk_size
lowerCamelCase_ = None
lowerCamelCase_ = None
def a__ ( self : int , A_ : Callable , A_ : tuple , A_ : int ) -> int:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase_ = [c for c in candidates if c > min_chunk_size]
lowerCamelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A_ : int ) -> bool:
try:
with torch.no_grad():
fn(*A_ , chunk_size=A_ )
return True
except RuntimeError:
return False
lowerCamelCase_ = 0
lowerCamelCase_ = len(A_ ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase_ = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase_ = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase_ = i
lowerCamelCase_ = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self : List[str] , A_ : Iterable , A_ : Iterable ) -> bool:
"""simple docstring"""
lowerCamelCase_ = True
for aa, aa in zip(A_ , A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ , (list, tuple) ):
consistent &= self._compare_arg_caches(A_ , A_ )
elif isinstance(A_ , A_ ):
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
consistent &= self._compare_arg_caches(A_ , A_ )
else:
consistent &= aa == aa
return consistent
def a__ ( self : int , A_ : Callable , A_ : tuple , A_ : int , ) -> int:
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
lowerCamelCase_ = self._compare_arg_caches(self.cached_arg_data , A_ )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase_ = False
if not consistent:
lowerCamelCase_ = self._determine_favorable_chunk_size(
A_ , A_ , A_ , )
lowerCamelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 204 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __UpperCAmelCase ( self , _a ):
__a = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
__a = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_a , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __UpperCAmelCase ( self , _a ):
TrainingJobAnalytics(_a ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __UpperCAmelCase ( self , _a ):
# create estimator
__a = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
__a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 11 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self ) -> int:
a , a =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=__A , dtype=jnp.bfloataa )
a , a =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=__A , from_pt=__A , dtype=jnp.bfloataa )
a =controlnet_params
a ='''bird'''
a =jax.device_count()
a =pipe.prepare_text_inputs([prompts] * num_samples )
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
a =pipe.prepare_image_inputs([canny_image] * num_samples )
a =jax.random.PRNGKey(0 )
a =jax.random.split(__A , jax.device_count() )
a =replicate(__A )
a =shard(__A )
a =shard(__A )
a =pipe(
prompt_ids=__A , image=__A , params=__A , prng_seed=__A , num_inference_steps=50 , jit=__A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a =images[0, 253:256, 253:256, -1]
a =jnp.asarray(jax.device_get(image_slice.flatten() ) )
a =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a , a =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=__A , dtype=jnp.bfloataa )
a , a =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=__A , from_pt=__A , dtype=jnp.bfloataa )
a =controlnet_params
a ='''Chef in the kitchen'''
a =jax.device_count()
a =pipe.prepare_text_inputs([prompts] * num_samples )
a =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
a =pipe.prepare_image_inputs([pose_image] * num_samples )
a =jax.random.PRNGKey(0 )
a =jax.random.split(__A , jax.device_count() )
a =replicate(__A )
a =shard(__A )
a =shard(__A )
a =pipe(
prompt_ids=__A , image=__A , params=__A , prng_seed=__A , num_inference_steps=50 , jit=__A , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
a =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a =images[0, 253:256, 253:256, -1]
a =jnp.asarray(jax.device_get(image_slice.flatten() ) )
a =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 81 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :list[str] | None = None , lowerCAmelCase__ :dict[str, float] | None = None , lowerCAmelCase__ :bool = False , ) -> tuple[int, float, str]:
_a = cipher_alphabet or [chr(lowerCAmelCase__ ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_a = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
_a = frequencies_dict
if not case_sensitive:
_a = ciphertext.lower()
# Chi squared statistic values
_a = {}
# cycle through all of the shifts
for shift in range(len(lowerCAmelCase__ ) ):
_a = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_a = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCAmelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_a = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_a = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_a = decrypted_with_shift.lower().count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_a = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_a = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_a = decrypted_with_shift.count(lowerCAmelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_a = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_a = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_a = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCAmelCase__ :int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_a = min(
lowerCAmelCase__ , key=lowerCAmelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_a
) , (
_a
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 356 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_a , _a = 1, 1
for _ in range(number_of_steps - 1 ):
_a , _a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=9_9 , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=9 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase=8 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.002 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = encoder_seq_length
lowerCamelCase__ = decoder_seq_length
# For common tests
lowerCamelCase__ = self.decoder_seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_attention_mask
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = d_ff
lowerCamelCase__ = relative_attention_num_buckets
lowerCamelCase__ = dropout_rate
lowerCamelCase__ = initializer_factor
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = pad_token_id
lowerCamelCase__ = decoder_start_token_id
lowerCamelCase__ = None
lowerCamelCase__ = decoder_layers
def __lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig.from_pretrained('''google/umt5-base''' )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__lowerCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ = self.get_config()
lowerCamelCase__ = config.num_attention_heads
lowerCamelCase__ = self.prepare_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, input_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCamelCase ( self ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , )
lowerCamelCase__ = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
lowerCamelCase__ = result.last_hidden_state
lowerCamelCase__ = result.past_key_values
lowerCamelCase__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__lowerCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ).get_decoder().to(__lowerCAmelCase ).eval()
# first forward pass
lowerCamelCase__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , use_cache=__lowerCAmelCase )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) )
self.parent.assertTrue(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ = model(__lowerCAmelCase )['''last_hidden_state''']
lowerCamelCase__ = model(__lowerCAmelCase , past_key_values=__lowerCAmelCase )['''last_hidden_state''']
# select random slice
lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = UMTaModel(config=__lowerCAmelCase ).to(__lowerCAmelCase ).half().eval()
lowerCamelCase__ = model(**__lowerCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__lowerCAmelCase ).any().item() )
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase_ = [0.8, 0.9]
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ = UMTaModel(config_and_inputs[0] ).to(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__lowerCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ = config_and_inputs[0]
lowerCamelCase__ = UMTaForConditionalGeneration(__lowerCAmelCase ).eval()
model.to(__lowerCAmelCase )
lowerCamelCase__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__lowerCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase ),
}
for attn_name, (name, mask) in zip(__lowerCAmelCase , head_masking.items() ):
lowerCamelCase__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=__lowerCAmelCase )
lowerCamelCase__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , **__lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__lowerCAmelCase ).to(__lowerCAmelCase )
lowerCamelCase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__lowerCAmelCase , legacy=__lowerCAmelCase )
lowerCamelCase__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowerCamelCase__ = tokenizer(__lowerCAmelCase , return_tensors='''pt''' , padding=__lowerCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = model.generate(input_ids.to(__lowerCAmelCase ) )
lowerCamelCase__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowerCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 209 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_a = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
_a = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
_a = "▁"
# Segments (not really needed)
_a = 0
_a = 1
_a = 2
_a = 3
_a = 4
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = """left"""
lowerCAmelCase_ = XLNetTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<sep>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<eop>", "<eod>"] , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ = 3
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
return (out_vocab_file,)
| 209 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = '▁'
lowercase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase__ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
lowercase__ = {
'facebook/xglm-564M': 2048,
}
class __snake_case ( __lowerCAmelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase = None , **lowercase , ) -> None:
'''simple docstring'''
a__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a__: str = 7
a__: Any = [f'<madeupword{i}>' for i in range(self.num_madeup_words)]
a__: List[str] = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
a__: Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase))
a__: List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a__: Any = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
a__: str = len(self.sp_model)
a__: Union[str, Any] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(lowercase)
a__: Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self) -> Dict:
'''simple docstring'''
a__: Optional[int] = self.__dict__.copy()
a__: Optional[int] = None
a__: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase) -> str:
'''simple docstring'''
a__: Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a__: Any = {}
a__: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a__: List[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase)
if token_ids_a is None:
return [1] + ([0] * len(lowercase))
return [1] + ([0] * len(lowercase)) + [1, 1] + ([0] * len(lowercase))
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__: Dict = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: List[str] = {self.convert_ids_to_tokens(lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__: int = self.sp_model.PieceToId(lowercase)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def lowerCamelCase_ ( self , lowercase) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = ''.join(lowercase).replace(lowercase , ' ').strip()
return out_string
def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
a__: Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase)
elif not os.path.isfile(self.vocab_file):
with open(lowercase , 'wb') as fi:
a__: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase)
return (out_vocab_file,)
| 203 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ = re.compile(r'\b(a|an|the)\b', re.UNICODE)
lowercase__ = None
def __a ( ) ->List[Any]:
a__: Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_SCREAMING_SNAKE_CASE , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
a__: Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Any = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
a__: Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__: Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Dict = (2 * precision * recall) / (precision + recall)
return fa
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Union[str, Any] = {}
a__: Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[int] = qa['id']
a__: List[Any] = [t for t in qa['answers']['text'] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__: str = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__: Any = preds[qid]
# Take max over all gold answers
a__: List[str] = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
a__: Optional[int] = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: List[str] = {}
for qid, s in scores.items():
a__: List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a__: Optional[int] = float(not qid_to_has_ans[qid] )
else:
a__: Optional[Any] = s
return new_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
if not qid_list:
a__: str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__: Optional[Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
for k in new_eval:
a__: List[Any] = new_eval[k]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: Optional[int] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
a__: Dict = 0.0
a__: Optional[int] = 1.0
a__: Tuple = 0.0
a__: Tuple = [1.0]
a__: Optional[Any] = [0.0]
a__: Optional[Any] = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__: Optional[Any] = true_pos / float(i + 1 )
a__: int = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
a__: Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__: Optional[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__: List[str] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__: Optional[Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
a__: List[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_exact' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_f1' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_oracle' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if not qid_list:
return
a__: Any = [na_probs[k] for k in qid_list]
a__: List[str] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'na_prob_hist_{name}.png' ) )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__: List[Any] = num_no_ans
a__: Union[str, Any] = cur_score
a__: Optional[Any] = 0.0
a__: str = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__: Tuple = scores[qid]
else:
if preds[qid]:
a__: Optional[Any] = -1
else:
a__: Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a__: Dict = cur_score
a__: Optional[int] = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__ , a__: str = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__: Optional[int] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = best_exact
a__: Dict = exact_thresh
a__: Optional[int] = best_fa
a__: str = fa_thresh
def __a ( ) ->int:
with open(OPTS.data_file ) as f:
a__: Tuple = json.load(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = {k: 0.0 for k in preds}
a__: List[Any] = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
a__: Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
a__: Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__: Optional[Any] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: str = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
a__: List[str] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'HasAns' )
if no_ans_qids:
a__: Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
lowercase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 203 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowercase : List[Any] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase : Optional[Any] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__lowercase : Dict = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["input_ids", "attention_mask"]
A_ = BartTokenizer
def __init__( self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
__a : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
__a : Tuple = getattr(__a , pre_tok_state.pop('type' ) )
__a : List[str] = add_prefix_space
__a : int = pre_tok_class(**__a )
__a : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__a : Union[str, Any] = 'post_processor'
__a : str = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
__a : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : Optional[Any] = tuple(state['sep'] )
if "cls" in state:
__a : Any = tuple(state['cls'] )
__a : str = False
if state.get('add_prefix_space' , __a ) != add_prefix_space:
__a : Dict = add_prefix_space
__a : Tuple = True
if state.get('trim_offsets' , __a ) != trim_offsets:
__a : Optional[Any] = trim_offsets
__a : Dict = True
if changes_to_apply:
__a : Tuple = getattr(__a , state.pop('type' ) )
__a : Optional[Any] = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
__a : Union[str, Any] = value
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
__a : Optional[int] = kwargs.get('is_split_into_words' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__a , **__a )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
__a : List[Any] = kwargs.get('is_split_into_words' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__a , **__a )
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : str = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def __UpperCAmelCase ( self , __a , __a=None ):
'''simple docstring'''
__a : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : str = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 27 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase=2_8_1_2_3 ) -> Any:
lowercase__: Optional[Any] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase__: Union[str, Any] = set()
lowercase__: Optional[Any] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__UpperCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 177 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
snake_case = number_of_bytes // partitions
snake_case = []
for i in range(snake_case__ ):
snake_case = i * bytes_per_partition + 1
snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : int=3_0 , lowerCAmelCase__ : Dict=4_0_0 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=1 / 2_5_5 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[Any]=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[int] = min_resolution
_UpperCAmelCase : List[Any] = max_resolution
_UpperCAmelCase : Union[str, Any] = do_resize
_UpperCAmelCase : Optional[Any] = size
_UpperCAmelCase : Dict = do_rescale
_UpperCAmelCase : Optional[Any] = rescale_factor
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : List[str] = image_mean
_UpperCAmelCase : Union[str, Any] = image_std
_UpperCAmelCase : Optional[int] = do_pad
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any]=False ) -> int:
"""simple docstring"""
if not batched:
_UpperCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase : Optional[int] = image.size
else:
_UpperCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : str = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase : str = self.size['shortest_edge']
_UpperCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : str = self.size['shortest_edge']
_UpperCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
_UpperCAmelCase : str = []
for image in image_inputs:
_UpperCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = DetrImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "rescale_factor" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_pad" ) )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
_UpperCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_UpperCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
_UpperCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_UpperCAmelCase : Any = json.loads(f.read() )
_UpperCAmelCase : Tuple = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
_UpperCAmelCase : Dict = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
_UpperCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase : List[str] = torch.tensor([5887.9600, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase : Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _SCREAMING_SNAKE_CASE ) )
@slow
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_UpperCAmelCase : Optional[int] = json.loads(f.read() )
_UpperCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
_UpperCAmelCase : Union[str, Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_UpperCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
_UpperCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
_UpperCAmelCase : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase : str = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase : Dict = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _SCREAMING_SNAKE_CASE ) ) | 145 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Optional[Any]=37 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=0 , ) ->int:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = projection_dim
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
snake_case_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = TFDPRContextEncoder(config=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder(config=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = TFDPRReader(config=__lowerCAmelCase )
snake_case_ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class __A (_a , _a , unittest.TestCase):
'''simple docstring'''
__lowercase: List[str] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowercase: Optional[Any] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowercase: str = False
__lowercase: Optional[int] = False
__lowercase: Any = False
__lowercase: Optional[int] = False
__lowercase: List[Any] = False
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ = TFDPRModelTester(self )
snake_case_ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCAmelCase )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCAmelCase )
@slow
def lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRContextEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRQuestionEncoder.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFDPRReader.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
snake_case_ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case_ = model(__lowerCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case_ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """mobilenet_v1"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->float:
"""simple docstring"""
return 1E-4
| 233 | 0 |
import doctest
from collections import deque
import numpy as np
class __snake_case :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 219 | import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase_ = True
lowerCAmelCase_ = "ml.p3.2xlarge"
lowerCAmelCase_ = "accelerate_sagemaker_execution_role"
lowerCAmelCase_ = "hf-sm"
lowerCAmelCase_ = "us-east-1"
lowerCAmelCase_ = 1
lowerCAmelCase_ = "accelerate-sagemaker-1"
lowerCAmelCase_ = "1.6"
lowerCAmelCase_ = "4.4"
lowerCAmelCase_ = "train.py"
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCAmelCase_ = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __snake_case ( unittest.TestCase ):
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , _lowercase )
assert isinstance(converted_args["""do_train"""] , _lowercase )
assert isinstance(converted_args["""epochs"""] , _lowercase )
assert isinstance(converted_args["""learning_rate"""] , _lowercase )
assert isinstance(converted_args["""max_steps"""] , _lowercase )
with pytest.raises(_lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 219 | 1 |
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 371 | """simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_lowerCAmelCase : Tuple = torch.nn.Linear(2 ,4 )
_lowerCAmelCase : Union[str, Any] = torch.optim.AdamW(model.parameters() ,lr=1.0 )
_lowerCAmelCase : Tuple = torch.optim.lr_scheduler.OneCycleLR(_lowerCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
_lowerCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_lowerCAmelCase : List[Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> int:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
_lowerCAmelCase : List[str] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a__ ):
_lowerCAmelCase : Tuple = Accelerator(cpu=a__ )
def __A ( self ):
_lowerCAmelCase : Dict = Accelerator()
_lowerCAmelCase : Any = GradientState()
assert state.num_steps == 1
_lowerCAmelCase : Optional[int] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_lowerCAmelCase : Dict = False
assert state.sync_gradients is False
GradientState._reset_state()
def __A ( self ):
_lowerCAmelCase : Optional[int] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __A ( self ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a__ , **a__ ):
pass
with patch("""torch.cuda.set_device""" , a__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
_lowerCAmelCase : Dict = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def __A ( self ):
_lowerCAmelCase : Any = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = get_signature(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = create_components()
accelerator.prepare(a__ , a__ , a__ , a__ , a__ )
_lowerCAmelCase : Optional[Any] = get_signature(a__ )
# saving hook
def save_config(a__ , a__ , a__ ):
_lowerCAmelCase : Dict = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(a__ , """data.json""" ) , """w""" ) as f:
json.dump(a__ , a__ )
# loading hook
def load_config(a__ , a__ ):
with open(os.path.join(a__ , """data.json""" ) , """r""" ) as f:
_lowerCAmelCase : int = json.load(a__ )
_lowerCAmelCase : str = config["""class_name"""]
_lowerCAmelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(a__ )
_lowerCAmelCase : int = accelerator.register_load_state_pre_hook(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Dict = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a__ )
# make sure random weights don't match with hooks removed
load_random_weights(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
_lowerCAmelCase : Optional[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(a__ )
self.assertTrue(abs(model_signature - get_signature(a__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Any = None
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertTrue(dummy_obj is None )
def __A ( self ):
_lowerCAmelCase : str = Accelerator()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = create_components()
_lowerCAmelCase : Optional[int] = [1, 2, 3]
# This should work
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = accelerator.prepare(
a__ , a__ , a__ , a__ , a__ , a__ )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(a__ , """_is_accelerate_prepared""" , a__ ) , a__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map={"""""": 0} , )
_lowerCAmelCase : List[str] = Accelerator()
# This should work
_lowerCAmelCase : List[Any] = accelerator.prepare(a__ )
@slow
@require_bnb
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Any = Accelerator()
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : Optional[Any] = """cpu"""
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=a__ , load_in_abit=a__ , llm_inta_enable_fpaa_cpu_offload=a__ )
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = accelerator.prepare(a__ )
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
_lowerCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
_lowerCAmelCase : List[str] = infer_auto_device_map(a__ )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : Tuple = Accelerator()
# This should not work and get value error
with self.assertRaises(a__ ):
_lowerCAmelCase : Optional[int] = accelerator.prepare(a__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __A ( self ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
_lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
_lowerCAmelCase : int = infer_auto_device_map(a__ )
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=a__ , device_map=a__ , )
_lowerCAmelCase : str = Accelerator()
# This should work
_lowerCAmelCase : str = accelerator.prepare(a__ )
@require_cuda
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Any = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_lowerCAmelCase : List[str] = Accelerator(cpu=a__ )
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
| 126 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ):
# Checks if the entire collection has been sorted
if len(SCREAMING_SNAKE_CASE_ ) <= 1 or n <= 1:
return
insert_next(SCREAMING_SNAKE_CASE_ , n - 1 )
rec_insertion_sort(SCREAMING_SNAKE_CASE_ , n - 1 )
def _a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int ):
# Checks order between adjacent elements
if index >= len(SCREAMING_SNAKE_CASE_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCAmelCase , __lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(SCREAMING_SNAKE_CASE_ , index + 1 )
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter integers separated by spaces: """)
UpperCamelCase__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 92 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 92 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase_ = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {f'funnel-transformer/{name}': 5_12 for name in _model_names}
lowerCamelCase_ = {f'funnel-transformer/{name}': {'''do_lower_case''': True} for name in _model_names}
class __A( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = FunnelTokenizer
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = 2
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="##" , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**lowerCAmelCase__ )
UpperCamelCase__ = do_lower_case
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 363 |
from PIL import Image
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = image.size
UpperCamelCase__ = 0
UpperCamelCase__ = image.load()
for i in range(__a ):
for j in range(__a ):
UpperCamelCase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__a ):
for i in range(__a ):
UpperCamelCase__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase_ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 178 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__a :Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = test_results.split(" " )
A_ = 0
A_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A_ = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __snake_case ( __UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {}
A_ = None
A_ = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" ,__UpperCamelCase ):
A_ = True
A_ = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
A_ = line
A_ = False
return failures
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict ):
A_ = title
A_ = doc_test_results["time_spent"].split("," )[0]
A_ = doc_test_results["success"]
A_ = doc_test_results["failures"]
A_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
A_ = doc_test_results
@property
def __A ( self : List[str] ):
A_ = [self._time_spent]
A_ = 0
for time in time_spent:
A_ = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCAmelCase ) == 1:
A_ = [0, 0, time_parts[0]]
A_ , A_ , A_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
A_ , A_ , A_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(UpperCAmelCase )}h{int(UpperCAmelCase )}m{int(UpperCAmelCase )}s'''
@property
def __A ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __A ( self : Dict ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def __A ( self : str ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def __A ( self : Tuple ):
A_ = 40
A_ = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(UpperCAmelCase , UpperCAmelCase )}
A_ = ""
for category, failures in category_failures.items():
if len(UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def __A ( self : List[Any] ):
A_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCAmelCase )
@staticmethod
def __A ( ):
A_ = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=UpperCAmelCase , )
def __A ( self : Optional[int] ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
A_ = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else "All tests passed."
A_ = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=UpperCAmelCase , )
def __A ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] ):
A_ = ""
for key, value in failures.items():
A_ = value[:200] + " [Truncated]" if len(UpperCAmelCase ) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
A_ = job_name
A_ = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
A_ = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __A ( self : Union[str, Any] ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
A_ = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
A_ = sorted(self.doc_test_results.items() , key=lambda UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
A_ = f'''*Num failures* :{len(job_result["failed"] )} \n'''
A_ = job_result["failures"]
A_ = self.get_reply_blocks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , text=UpperCAmelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'''Results for {job}''' , blocks=UpperCAmelCase , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def __snake_case ( ):
"""simple docstring"""
A_ = os.environ["GITHUB_RUN_ID"]
A_ = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
A_ = requests.get(__UpperCamelCase ).json()
A_ = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
A_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__UpperCamelCase ):
A_ = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." ,__UpperCamelCase )
return {}
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = {}
if os.path.exists(__UpperCamelCase ):
A_ = os.listdir(__UpperCamelCase )
for file in files:
try:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,encoding="utf-8" ) as f:
A_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(__UpperCamelCase ,__UpperCamelCase )}.''' ) from e
return _artifact
def __snake_case ( ):
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : str ):
A_ = name
A_ = []
def __str__( self : List[str] ):
return self.name
def __A ( self : int , UpperCAmelCase : str ):
self.paths.append({"name": self.name, "path": path} )
A_ = {}
A_ = filter(os.path.isdir ,os.listdir() )
for directory in directories:
A_ = directory
if artifact_name not in _available_artifacts:
A_ = Artifact(__UpperCamelCase )
_available_artifacts[artifact_name].add_path(__UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
__a :Union[str, Any] = get_job_links()
__a :Union[str, Any] = retrieve_available_artifacts()
__a :List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__a :int = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__a :Tuple = github_actions_job_links.get('run_doctests')
__a :Any = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__a :str = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__a , __a , __a :Tuple = handle_test_results(artifact['stats'])
__a :Union[str, Any] = failed
__a :List[str] = success
__a :List[Any] = time_spent[1:-1] + ', '
__a :int = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__a :Dict = line.replace('FAILED ', '')
__a :str = line.split()[0].replace('\n', '')
if "::" in line:
__a , __a :Any = line.split('::')
else:
__a , __a :Dict = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__a :List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__a :str = all_failures[test] if test in all_failures else 'N/A'
__a :Optional[int] = failure
break
__a :Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply() | 312 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 312 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase: str = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 356 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a__( lowerCamelCase__ ):
lowercase__ = 42
@flax_register_to_config
class a__( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = 32
lowercase__ = 4
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = False
def lowercase_ ( self : List[Any] , __snake_case : jax.random.KeyArray ):
# init input tensors
a : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
a : str = jnp.zeros(__snake_case , dtype=jnp.floataa )
a : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
a : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a , a : Optional[int] = jax.random.split(__snake_case )
a : Any = {'params': params_rng, 'dropout': dropout_rng}
return self.init(__snake_case , __snake_case , __snake_case , __snake_case )["params"]
def lowercase_ ( self : Union[str, Any] ):
a : int = self.block_out_channels
a : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a : str = self.num_attention_heads or self.attention_head_dim
# input
a : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a : Optional[int] = FlaxTimestepEmbedding(__snake_case , dtype=self.dtype )
a : Optional[Any] = self.only_cross_attention
if isinstance(__snake_case , __snake_case ):
a : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__snake_case , __snake_case ):
a : str = (num_attention_heads,) * len(self.down_block_types )
# down
a : int = []
a : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a : List[Any] = output_channel
a : Dict = block_out_channels[i]
a : Union[str, Any] = i == len(__snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : List[str] = FlaxDownBlockaD(
in_channels=__snake_case , out_channels=__snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__snake_case )
a : Dict = down_blocks
# mid
a : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a : List[Any] = []
a : Dict = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : List[str] = list(reversed(__snake_case ) )
a : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a : Dict = output_channel
a : Dict = reversed_block_out_channels[i]
a : Dict = reversed_block_out_channels[min(i + 1 , len(__snake_case ) - 1 )]
a : List[Any] = i == len(__snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a : List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a : Dict = FlaxUpBlockaD(
in_channels=__snake_case , out_channels=__snake_case , prev_output_channel=__snake_case , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__snake_case )
a : Dict = output_channel
a : int = up_blocks
# out
a : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
a : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : bool = True , __snake_case : bool = False , ):
# 1. time
if not isinstance(__snake_case , jnp.ndarray ):
a : Tuple = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
a : Tuple = timesteps.astype(dtype=jnp.floataa )
a : Optional[int] = jnp.expand_dims(__snake_case , 0 )
a : Optional[Any] = self.time_proj(__snake_case )
a : Optional[int] = self.time_embedding(__snake_case )
# 2. pre-process
a : str = jnp.transpose(__snake_case , (0, 2, 3, 1) )
a : Optional[int] = self.conv_in(__snake_case )
# 3. down
a : Optional[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(__snake_case , __snake_case ):
a , a : Any = down_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
else:
a , a : Optional[Any] = down_block(__snake_case , __snake_case , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
__snake_case , __snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a : Tuple = new_down_block_res_samples
# 4. mid
a : int = self.mid_block(__snake_case , __snake_case , __snake_case , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a : int = down_block_res_samples[-(self.layers_per_block + 1) :]
a : List[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__snake_case , __snake_case ):
a : Optional[int] = up_block(
__snake_case , temb=__snake_case , encoder_hidden_states=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train , )
else:
a : Optional[Any] = up_block(__snake_case , temb=__snake_case , res_hidden_states_tuple=__snake_case , deterministic=not train )
# 6. post-process
a : Any = self.conv_norm_out(__snake_case )
a : List[str] = nn.silu(__snake_case )
a : Optional[int] = self.conv_out(__snake_case )
a : Any = jnp.transpose(__snake_case , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__snake_case ) | 96 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any]=32 , __UpperCAmelCase: Optional[int]=10 , __UpperCAmelCase: Any=100 , __UpperCAmelCase: Optional[int]=1026 , __UpperCAmelCase: Union[str, Any]=True , __UpperCAmelCase: Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , __UpperCAmelCase: str="igf_context_pairs.jbl" , ) -> int:
set_seed(3 )
# generate train_data and objective_set
UpperCamelCase__ ,UpperCamelCase__ : Dict = generate_datasets(
__UpperCAmelCase , __UpperCAmelCase , number=__UpperCAmelCase , min_len=1026 , trim=__UpperCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase__ : Tuple = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
UpperCamelCase__ : Union[str, Any] = load_gpta('''gpt2''' ).to(__UpperCAmelCase )
print('''computing perplexity on objective set''' )
UpperCamelCase__ : str = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).item()
print('''perplexity on objective set:''' , __UpperCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any]=15 , __UpperCAmelCase: Union[str, Any]=128 , __UpperCAmelCase: List[str]=100 , __UpperCAmelCase: str="igf_model.pt" , ) -> Optional[int]:
set_seed(42 )
# Load pre-trained model
UpperCamelCase__ : int = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
UpperCamelCase__ : Tuple = SecondaryLearner(__UpperCAmelCase )
# Train secondary learner
UpperCamelCase__ : List[Any] = train_secondary_learner(
__UpperCAmelCase , __UpperCAmelCase , max_epochs=__UpperCAmelCase , batch_size=__UpperCAmelCase , eval_freq=100 , igf_model_path=__UpperCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str=32 , __UpperCAmelCase: List[Any]=1000 , __UpperCAmelCase: Dict=16 , __UpperCAmelCase: Tuple=1.0 , __UpperCAmelCase: Dict=recopy_gpta , __UpperCAmelCase: List[str]=None , __UpperCAmelCase: Dict=10 , __UpperCAmelCase: int="gpt2_finetuned.pt" , ) -> List[str]:
UpperCamelCase__ : Union[str, Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
UpperCamelCase__ : int = RandomSampler(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = DataLoader(__UpperCAmelCase , sampler=__UpperCAmelCase )
UpperCamelCase__ : List[str] = max_steps // (len(__UpperCAmelCase )) + 1
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Tuple = torch.zeros((1, context_len) , dtype=torch.long , device=__UpperCAmelCase )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = recopy_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(__UpperCAmelCase )
secondary_learner.eval()
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase__ : Union[str, Any] = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
test_perps.append(__UpperCAmelCase )
print('''Test perplexity, step''' , __UpperCAmelCase , ''':''' , __UpperCAmelCase )
for epoch in range(int(__UpperCAmelCase ) ):
for step, example in enumerate(__UpperCAmelCase ):
torch.cuda.empty_cache()
UpperCamelCase__ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCamelCase__ : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase__ : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
UpperCamelCase__ : List[str] = True
if secondary_learner is not None:
UpperCamelCase__ : List[str] = secondary_learner.forward(
torch.tensor(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__UpperCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase__ : List[Any] = -1
if predicted_q < threshold:
UpperCamelCase__ : Optional[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCamelCase__ : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase__ : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase__ : Dict = compute_perplexity(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
test_perps.append(__UpperCAmelCase )
print('''Test perplexity, step''' , __UpperCAmelCase , ''':''' , __UpperCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __UpperCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase_ ( ) -> Union[str, Any]:
UpperCamelCase__ : List[str] = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=__UpperCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=__UpperCAmelCase , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=__UpperCAmelCase , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=__UpperCAmelCase , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=__UpperCAmelCase , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=__UpperCAmelCase , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=__UpperCAmelCase , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=__UpperCAmelCase , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=__UpperCAmelCase , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=__UpperCAmelCase , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=__UpperCAmelCase , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=__UpperCAmelCase , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__UpperCAmelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
UpperCamelCase__ : List[str] = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
UpperCamelCase__ : Optional[int] = training_secondary_learner(
__UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
UpperCamelCase__ : Any = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase__ ,UpperCamelCase__ : Any = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=__UpperCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__UpperCAmelCase , secondary_learner=__UpperCAmelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 201 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = 32, __magic_name__=PILImageResampling.BILINEAR, __magic_name__ = True, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : int = do_resize
UpperCamelCase__ : Tuple = do_rescale
UpperCamelCase__ : Any = size_divisor
UpperCamelCase__ : List[Any] = resample
super().__init__(**__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
UpperCamelCase__ : Any = height // size_divisor * size_divisor
UpperCamelCase__ : Optional[int] = width // size_divisor * size_divisor
UpperCamelCase__ : str = resize(__magic_name__, (new_h, new_w), resample=__magic_name__, data_format=__magic_name__, **__magic_name__ )
return image
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, **__magic_name__ ) -> np.ndarray:
"""simple docstring"""
return rescale(image=__magic_name__, scale=__magic_name__, data_format=__magic_name__, **__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__=None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> BatchFeature:
"""simple docstring"""
UpperCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Any = size_divisor if size_divisor is not None else self.size_divisor
UpperCamelCase__ : str = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
UpperCamelCase__ : Union[str, Any] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[Any] = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
UpperCamelCase__ : str = [self.resize(__magic_name__, size_divisor=__magic_name__, resample=__magic_name__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(__magic_name__, scale=1 / 255 ) for image in images]
UpperCamelCase__ : Optional[Any] = [to_channel_dimension_format(__magic_name__, __magic_name__ ) for image in images]
UpperCamelCase__ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=__magic_name__, tensor_type=__magic_name__ )
| 201 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase__( self :Any ) -> Any:
a__ = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = self._create_example_records()
a__ = Dataset.from_list(__snake_case )
self.assertListEqual(dset.column_names ,['col_1', 'col_2'] )
for i, r in enumerate(__snake_case ):
self.assertDictEqual(__snake_case ,example_records[i] )
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = self._create_example_records()
a__ = Dataset.from_list(__snake_case )
a__ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info ,dset_from_dict.info )
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]: # checks what happens with missing columns
a__ = [{'col_1': 1}, {'col_2': 'x'}]
a__ = Dataset.from_list(__snake_case )
self.assertDictEqual(dset[0] ,{'col_1': 1} )
self.assertDictEqual(dset[1] ,{'col_1': None} ) # NB: first record is used for columns
def lowerCamelCase__( self :Any ) -> Tuple: # checks if the type can be inferred from the second record
a__ = [{'col_1': []}, {'col_1': [1, 2]}]
a__ = Dataset.from_list(__snake_case )
self.assertEqual(dset.info.features['col_1'] ,Sequence(Value('int64' ) ) )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = Dataset.from_list([] )
self.assertEqual(len(__snake_case ) ,0 )
self.assertListEqual(dset.column_names ,[] )
| 109 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'bird'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
a__ = pipe.prepare_image_inputs([canny_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'Chef in the kitchen'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
a__ = pipe.prepare_image_inputs([pose_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 109 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 |
'''simple docstring'''
class __lowerCAmelCase : # Public class to implement a graph
'''simple docstring'''
def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ):
'''simple docstring'''
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase )
def UpperCamelCase__ (self : Dict ): # And finally, count all islands.
'''simple docstring'''
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += 1
return count
| 2 | 0 |
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : Optional[Any] = grid.shape
UpperCAmelCase : List[str] = [-1, 1, 0, 0]
UpperCAmelCase : Optional[int] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase : Optional[Any] = [(0, source)], set()
UpperCAmelCase : str = np.full((rows, cols) , np.inf )
UpperCAmelCase : str = 0
UpperCAmelCase : Tuple = np.empty((rows, cols) , dtype=UpperCAmelCase_ )
UpperCAmelCase : Tuple = None
while queue:
(UpperCAmelCase) : Union[str, Any] = heappop(UpperCAmelCase_ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase : int = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase : Tuple = predecessors[x, y]
path.append(UpperCAmelCase_ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(UpperCAmelCase_ ) ):
UpperCAmelCase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(UpperCAmelCase_ , (dist + 1, (nx, ny)) )
UpperCAmelCase : Dict = dist + 1
UpperCAmelCase : Optional[int] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """maskformer"""
UpperCAmelCase_ : Union[str, Any] = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase_ : Union[str, Any] = ["""resnet""", """swin"""]
UpperCAmelCase_ : Optional[int] = ["""detr"""]
def __init__( self : Union[str, Any] , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : float = 0.1 , lowercase_ : bool = False , lowercase_ : Optional[Dict] = None , lowercase_ : Optional[Dict] = None , lowercase_ : float = 0.02 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 20.0 , lowercase_ : Optional[bool] = None , **lowercase_ : int , ) -> Optional[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = backbone_config.pop('model_type' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : Tuple = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : str = config_class.from_dict(lowercase_ )
UpperCAmelCase : int = backbone_config
UpperCAmelCase : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase : Union[str, Any] = fpn_feature_size
UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : List[str] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : int = cross_entropy_weight
UpperCAmelCase : Tuple = dice_weight
UpperCAmelCase : int = mask_weight
UpperCAmelCase : Any = use_auxiliary_loss
UpperCAmelCase : Tuple = no_object_weight
UpperCAmelCase : str = output_auxiliary_logits
UpperCAmelCase : Dict = self.decoder_config.encoder_attention_heads
UpperCAmelCase : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Any , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : Optional[Any] ) -> str:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, any]:
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : List[str] = self.backbone_config.to_dict()
UpperCAmelCase : Any = self.decoder_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 280 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] = "https://www.worldometers.info/coronavirus" ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = BeautifulSoup(requests.get(A_ ).text , "html.parser" )
_UpperCAmelCase : Tuple = soup.findAll("h1" )
_UpperCAmelCase : str = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(A_ , A_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 31 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a ( UpperCamelCase__):
"""simple docstring"""
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "embed_dim" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_heads" ) )
class _a :
"""simple docstring"""
def __init__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: str=13 , __lowerCamelCase: Tuple=64 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: List[Any]=[16, 48, 96] , __lowerCamelCase: Union[str, Any]=[1, 3, 6] , __lowerCamelCase: Tuple=[1, 2, 10] , __lowerCamelCase: int=[7, 3, 3] , __lowerCamelCase: Dict=[4, 2, 2] , __lowerCamelCase: int=[2, 1, 1] , __lowerCamelCase: Dict=[2, 2, 2] , __lowerCamelCase: List[str]=[False, False, True] , __lowerCamelCase: str=[0.0, 0.0, 0.0] , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1e-12 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=2 , ):
'''simple docstring'''
UpperCamelCase__: Dict = parent
UpperCamelCase__: Union[str, Any] = batch_size
UpperCamelCase__: int = image_size
UpperCamelCase__: Dict = patch_sizes
UpperCamelCase__: Any = patch_stride
UpperCamelCase__: Optional[int] = patch_padding
UpperCamelCase__: Any = is_training
UpperCamelCase__: Dict = use_labels
UpperCamelCase__: List[str] = num_labels
UpperCamelCase__: Tuple = num_channels
UpperCamelCase__: int = embed_dim
UpperCamelCase__: int = num_heads
UpperCamelCase__: Dict = stride_kv
UpperCamelCase__: Optional[int] = depth
UpperCamelCase__: int = cls_token
UpperCamelCase__: Optional[Any] = attention_drop_rate
UpperCamelCase__: Tuple = initializer_range
UpperCamelCase__: Dict = layer_norm_eps
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCamelCase__: Tuple = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__: List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = TFCvtModel(config=__lowerCamelCase )
UpperCamelCase__: str = model(__lowerCamelCase , training=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = (self.image_size, self.image_size)
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase__: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase__: List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.num_labels
UpperCamelCase__: Tuple = TFCvtForImageClassification(__lowerCamelCase )
UpperCamelCase__: Tuple = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = config_and_inputs
UpperCamelCase__: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = TFCvtModelTester(self )
UpperCamelCase__: int = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: int = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(__lowerCamelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: int = model_class(__lowerCamelCase )
UpperCamelCase__: Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: List[str] = [*signature.parameters.keys()]
UpperCamelCase__: Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str ):
UpperCamelCase__: Tuple = model_class(__lowerCamelCase )
UpperCamelCase__: Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
UpperCamelCase__: Optional[Any] = outputs.hidden_states
UpperCamelCase__: str = len(self.model_tester.depth )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: int = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: List[Any] = TFCvtModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase__: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__: str = self.default_image_processor
UpperCamelCase__: List[str] = prepare_img()
UpperCamelCase__: Tuple = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
UpperCamelCase__: List[str] = model(**__lowerCamelCase )
# verify the logits
UpperCamelCase__: Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCamelCase__: Optional[Any] = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
| 149 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase : int = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[Any]=False )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = 2
if unlogit:
UpperCAmelCase__ : Tuple = torch.pow(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = p * torch.log(snake_case )
UpperCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(f'{x + 1}' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] , snake_case : str , snake_case : Dict=True , snake_case : str=True , snake_case : Dict=None , snake_case : Tuple=False )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ : Dict = torch.zeros(snake_case , snake_case ).to(args.device )
UpperCAmelCase__ : Dict = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
UpperCAmelCase__ : List[Any] = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Dict = 0.0
UpperCAmelCase__ : List[str] = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
(UpperCAmelCase__ ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ : str = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
UpperCAmelCase__ : List[Any] = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : str = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
UpperCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
UpperCAmelCase__ : Union[str, Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ : Dict = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ : Any = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[Any] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
UpperCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
UpperCAmelCase__ : List[str] = torch.ones_like(snake_case )
UpperCAmelCase__ : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ : Optional[Any] = float("Inf" )
UpperCAmelCase__ : Dict = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase__ : Any = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ : int = new_head_mask.view(-1 )
UpperCAmelCase__ : int = 0.0
UpperCAmelCase__ : List[Any] = new_head_mask.view_as(snake_case )
UpperCAmelCase__ : List[Any] = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
UpperCAmelCase__ : Union[str, Any] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
UpperCAmelCase__ : Any = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ : Optional[int] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
UpperCAmelCase__ : List[Any] = 1 / loss
UpperCAmelCase__ : List[Any] = datetime.now() - before_time
UpperCAmelCase__ : Optional[Any] = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : int = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
UpperCAmelCase__ : Any = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ : str = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
UpperCAmelCase__ : str = 1 / loss
UpperCAmelCase__ : Any = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(snake_case , args.output_dir )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=42 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
UpperCAmelCase__ : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase__ : Dict = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ : List[Any] = torch.device("cuda" , args.local_rank )
UpperCAmelCase__ : Optional[int] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ : str = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
UpperCAmelCase__ : int = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
UpperCAmelCase__ : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ : Any = (torch.from_numpy(snake_case ),)
UpperCAmelCase__ : Any = TensorDataset(*snake_case )
UpperCAmelCase__ : Dict = RandomSampler(snake_case )
UpperCAmelCase__ : List[str] = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ : int = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
return model
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_06 ,)
return RobertaSeriesModelWithTransformation(__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
def extract(*__lowerCamelCase ,**__lowerCamelCase ):
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : int = torch.ones([0] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : str = self.dummy_cond_unet
lowerCAmelCase__ : Any = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.dummy_vae
lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase__ : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ : List[str] = 77
lowerCAmelCase__ : Dict = self.dummy_image.to(__lowerCamelCase )
lowerCAmelCase__ : Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : str = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase ,scheduler=__lowerCamelCase ,vae=__lowerCamelCase ,text_encoder=__lowerCamelCase ,tokenizer=__lowerCamelCase ,safety_checker=__lowerCamelCase ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase__ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__lowerCamelCase )
lowerCAmelCase__ : str = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Any = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : str = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowerCAmelCase__ : Tuple = alt_pipe(
[prompt] ,generator=__lowerCamelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=__lowerCamelCase ,)
lowerCAmelCase__ : Any = output.images
lowerCAmelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
lowerCAmelCase__ : str = alt_pipe(
[prompt] ,generator=__lowerCamelCase ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type='''np''' ,image=__lowerCamelCase ,return_dict=__lowerCamelCase ,)[0]
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : List[Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = self.dummy_cond_unet
lowerCAmelCase__ : str = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.dummy_vae
lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ : List[str] = 77
lowerCAmelCase__ : Union[str, Any] = self.dummy_image.to(__lowerCamelCase )
# put models in fp16
lowerCAmelCase__ : Optional[Any] = unet.half()
lowerCAmelCase__ : Tuple = vae.half()
lowerCAmelCase__ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[Any] = AltDiffusionImgaImgPipeline(
unet=__lowerCamelCase ,scheduler=__lowerCamelCase ,vae=__lowerCamelCase ,text_encoder=__lowerCamelCase ,tokenizer=__lowerCamelCase ,safety_checker=__lowerCamelCase ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase__ : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=__lowerCamelCase )
lowerCAmelCase__ : Dict = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
lowerCAmelCase__ : int = alt_pipe(
[prompt] ,generator=__lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' ,image=__lowerCamelCase ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ : List[Any] = init_image.resize((7_60, 5_04) )
lowerCAmelCase__ : Optional[Any] = '''BAAI/AltDiffusion'''
lowerCAmelCase__ : str = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase ,safety_checker=__lowerCamelCase ,)
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : List[Any] = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase__ : int = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = pipe(
prompt=__lowerCamelCase ,image=__lowerCamelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=__lowerCamelCase ,output_type='''np''' ,)
lowerCAmelCase__ : List[str] = output.images[0]
lowerCAmelCase__ : Optional[Any] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase__ : Optional[Any] = init_image.resize((7_68, 5_12) )
lowerCAmelCase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCAmelCase__ : Optional[int] = '''BAAI/AltDiffusion'''
lowerCAmelCase__ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__lowerCamelCase ,safety_checker=__lowerCamelCase ,)
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pipe(
prompt=__lowerCamelCase ,image=__lowerCamelCase ,strength=0.75 ,guidance_scale=7.5 ,generator=__lowerCamelCase ,output_type='''np''' ,)
lowerCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 129 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__snake_case : List[str] =logging.get_logger(__name__)
# General docstring
__snake_case : List[Any] ='MobileNetV1Config'
# Base docstring
__snake_case : Optional[Any] ='google/mobilenet_v1_1.0_224'
__snake_case : Any =[1, 1_0_2_4, 7, 7]
# Image classification docstring
__snake_case : Tuple ='google/mobilenet_v1_1.0_224'
__snake_case : Tuple ='tabby, tabby cat'
__snake_case : Optional[Any] =[
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Dict=None):
'''simple docstring'''
lowerCAmelCase__ : Dict = {}
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Any = model.mobilenet_va
else:
lowerCAmelCase__ : Tuple = model
lowerCAmelCase__ : Union[str, Any] = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase__ : Tuple = backbone.conv_stem.convolution.weight
lowerCAmelCase__ : int = backbone.conv_stem.normalization.bias
lowerCAmelCase__ : Optional[int] = backbone.conv_stem.normalization.weight
lowerCAmelCase__ : str = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ : str = backbone.conv_stem.normalization.running_var
for i in range(13):
lowerCAmelCase__ : Tuple = i + 1
lowerCAmelCase__ : Any = i * 2
lowerCAmelCase__ : Any = backbone.layer[pt_index]
lowerCAmelCase__ : List[str] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCAmelCase__ : Optional[int] = pointer.convolution.weight
lowerCAmelCase__ : Optional[int] = pointer.normalization.bias
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCAmelCase__ : List[Any] = pointer.normalization.running_mean
lowerCAmelCase__ : List[Any] = pointer.normalization.running_var
lowerCAmelCase__ : Dict = backbone.layer[pt_index + 1]
lowerCAmelCase__ : Optional[Any] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCAmelCase__ : Tuple = pointer.convolution.weight
lowerCAmelCase__ : int = pointer.normalization.bias
lowerCAmelCase__ : Union[str, Any] = pointer.normalization.weight
lowerCAmelCase__ : Any = pointer.normalization.running_mean
lowerCAmelCase__ : str = pointer.normalization.running_var
if isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase__ : Tuple = model.classifier.weight
lowerCAmelCase__ : Dict = model.classifier.bias
return tf_to_pt_map
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''')
raise
# Load weights from TF model
lowerCAmelCase__ : str = tf.train.list_variables(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""")
lowerCAmelCase__ : Dict = tf.train.load_variable(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : List[str] = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ : List[Any] = _build_tf_to_pytorch_map(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""")
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""")
continue
lowerCAmelCase__ : Any = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''')
lowerCAmelCase__ : Optional[Any] = np.transpose(lowerCamelCase_ ,(2, 3, 0, 1))
elif "weights" in name:
logger.info('''Transposing''')
if len(pointer.shape) == 2: # copying into linear layer
lowerCAmelCase__ : List[str] = array.squeeze().transpose()
else:
lowerCAmelCase__ : Tuple = np.transpose(lowerCamelCase_ ,(3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""")
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""")
lowerCAmelCase__ : str = torch.from_numpy(lowerCamelCase_)
tf_weights.pop(lowerCamelCase_ ,lowerCamelCase_)
tf_weights.pop(name + '''/RMSProp''' ,lowerCamelCase_)
tf_weights.pop(name + '''/RMSProp_1''' ,lowerCamelCase_)
tf_weights.pop(name + '''/ExponentialMovingAverage''' ,lowerCamelCase_)
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}""")
return model
def lowerCAmelCase__ ( lowerCamelCase_ : torch.Tensor ,lowerCamelCase_ : nn.Convad):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = features.shape[-2:]
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = conv_layer.stride
lowerCAmelCase__ , lowerCAmelCase__ : Dict = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ : Dict = max(kernel_height - stride_height ,0)
else:
lowerCAmelCase__ : List[str] = max(kernel_height - (in_height % stride_height) ,0)
if in_width % stride_width == 0:
lowerCAmelCase__ : List[Any] = max(kernel_width - stride_width ,0)
else:
lowerCAmelCase__ : Any = max(kernel_width - (in_width % stride_width) ,0)
lowerCAmelCase__ : Union[str, Any] = pad_along_width // 2
lowerCAmelCase__ : Optional[Any] = pad_along_width - pad_left
lowerCAmelCase__ : List[Any] = pad_along_height // 2
lowerCAmelCase__ : int = pad_along_height - pad_top
lowerCAmelCase__ : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ ,lowerCamelCase_ ,'''constant''' ,0.0)
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = 1 ,__lowerCamelCase = 1 ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = True ,) -> None:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCAmelCase__ : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ : Optional[int] = nn.Convad(
in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=__lowerCamelCase ,stride=__lowerCamelCase ,padding=__lowerCamelCase ,groups=__lowerCamelCase ,bias=__lowerCamelCase ,padding_mode='''zeros''' ,)
if use_normalization:
lowerCAmelCase__ : Optional[int] = nn.BatchNormad(
num_features=__lowerCamelCase ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=__lowerCamelCase ,track_running_stats=__lowerCamelCase ,)
else:
lowerCAmelCase__ : Dict = None
if use_activation:
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,__lowerCamelCase ):
lowerCAmelCase__ : Any = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ : List[str] = config.hidden_act
else:
lowerCAmelCase__ : int = None
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
lowerCAmelCase__ : str = apply_tf_padding(__lowerCamelCase ,self.convolution )
lowerCAmelCase__ : Tuple = self.convolution(__lowerCamelCase )
if self.normalization is not None:
lowerCAmelCase__ : Tuple = self.normalization(__lowerCamelCase )
if self.activation is not None:
lowerCAmelCase__ : Union[str, Any] = self.activation(__lowerCamelCase )
return features
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =MobileNetVaConfig
snake_case_ =load_tf_weights_in_mobilenet_va
snake_case_ ="""mobilenet_v1"""
snake_case_ ="""pixel_values"""
snake_case_ =False
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> None:
"""simple docstring"""
if isinstance(__lowerCamelCase ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__lowerCamelCase ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__snake_case : Optional[int] =R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__snake_case : List[Any] =R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = True ) -> int:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Dict = config
lowerCAmelCase__ : Dict = 32
lowerCAmelCase__ : List[str] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
lowerCAmelCase__ : Optional[int] = MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=config.num_channels ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=2 ,)
lowerCAmelCase__ : Optional[Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ : Optional[int] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=3 ,stride=strides[i] ,groups=__lowerCamelCase ,) )
self.layer.append(
MobileNetVaConvLayer(
__lowerCamelCase ,in_channels=__lowerCamelCase ,out_channels=__lowerCamelCase ,kernel_size=1 ,) )
lowerCAmelCase__ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase__ (self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCAmelCase__ : Optional[Any] = self.conv_stem(__lowerCamelCase )
lowerCAmelCase__ : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ : int = layer_module(__lowerCamelCase )
if output_hidden_states:
lowerCAmelCase__ : Optional[Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : Any = hidden_states
if self.pooler is not None:
lowerCAmelCase__ : str = torch.flatten(self.pooler(__lowerCamelCase ) ,start_dim=1 )
else:
lowerCAmelCase__ : str = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=__lowerCamelCase ,)
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = config.num_labels
lowerCAmelCase__ : Dict = MobileNetVaModel(__lowerCamelCase )
lowerCAmelCase__ : Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ : str = nn.Dropout(config.classifier_dropout_prob ,inplace=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = nn.Linear(__lowerCamelCase ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase__ (self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ : Union[str, Any] = self.mobilenet_va(__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase )
lowerCAmelCase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ : Optional[int] = self.classifier(self.dropout(__lowerCamelCase ) )
lowerCAmelCase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ : Optional[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ : int = '''single_label_classification'''
else:
lowerCAmelCase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ : Dict = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ : Optional[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase__ : Tuple = loss_fct(__lowerCamelCase ,__lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ : int = CrossEntropyLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ : Optional[int] = BCEWithLogitsLoss()
lowerCAmelCase__ : List[Any] = loss_fct(__lowerCamelCase ,__lowerCamelCase )
if not return_dict:
lowerCAmelCase__ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states ,)
| 129 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
while second != 0:
lowercase__: List[Any] = first & second
first ^= second
lowercase__: List[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = int(input("Enter the first number: ").strip())
__A = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 2 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : str = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 |
import numpy
# List of input, output pairs
_UpperCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Union[str, Any] = len(train_data)
_UpperCAmelCase : Dict = 0.0_0_9
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase="train" ):
return calculate_hypothesis_value(lowerCamelCase, lowerCamelCase ) - output(
lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=m ):
lowercase :Union[str, Any] = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = summation_of_cost_derivative(lowerCamelCase, lowerCamelCase ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase :str = 0.000_002
lowercase :Tuple = 0
lowercase :Optional[int] = 0
while True:
j += 1
lowercase :Union[str, Any] = [0, 0, 0, 0]
for i in range(0, len(lowerCamelCase ) ):
lowercase :Dict = get_cost_derivative(i - 1 )
lowercase :Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase, lowerCamelCase, atol=lowerCamelCase, rtol=lowerCamelCase, ):
break
lowercase :Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase__ ( ):
for i in range(len(lowerCamelCase ) ):
print(("Actual output value:", output(lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 236 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Any = StableDiffusionControlNetImgaImgPipeline
a : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : int = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
torch.manual_seed(0 )
UpperCamelCase__ : str = ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=__magic_name__, set_alpha_to_one=__magic_name__, )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
UpperCamelCase__ : Dict = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
UpperCamelCase__ : Optional[int] = CLIPTextModel(__magic_name__ )
UpperCamelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Union[str, Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCamelCase__ : Optional[int] = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase__ : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase__ : Dict = 2
UpperCamelCase__ : List[Any] = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=__magic_name__, device=torch.device(__magic_name__ ), )
UpperCamelCase__ : Optional[Any] = floats_tensor(control_image.shape, rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase__ : Any = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ : str = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : str = StableDiffusionControlNetImgaImgPipeline
a : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a : Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
torch.manual_seed(0 )
def init_weights(__magic_name__ ):
if isinstance(__magic_name__, torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCamelCase__ : Any = ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
UpperCamelCase__ : List[str] = ControlNetModel(
block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), )
controlneta.controlnet_down_blocks.apply(__magic_name__ )
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=__magic_name__, set_alpha_to_one=__magic_name__, )
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
UpperCamelCase__ : str = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
UpperCamelCase__ : str = CLIPTextModel(__magic_name__ )
UpperCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCamelCase__ : Tuple = MultiControlNetModel([controlneta, controlneta] )
UpperCamelCase__ : Optional[int] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=0 ) -> List[Any]:
"""simple docstring"""
if str(__magic_name__ ).startswith('''mps''' ):
UpperCamelCase__ : Dict = torch.manual_seed(__magic_name__ )
else:
UpperCamelCase__ : Dict = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCamelCase__ : List[Any] = 2
UpperCamelCase__ : Optional[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=__magic_name__, device=torch.device(__magic_name__ ), ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=__magic_name__, device=torch.device(__magic_name__ ), ),
]
UpperCamelCase__ : List[Any] = floats_tensor(control_image[0].shape, rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
UpperCamelCase__ : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) )
UpperCamelCase__ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self.get_dummy_components()
UpperCamelCase__ : str = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
UpperCamelCase__ : Optional[Any] = 10.0
UpperCamelCase__ : Tuple = 4
UpperCamelCase__ : List[Any] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : List[Any] = steps
UpperCamelCase__ : Optional[Any] = scale
UpperCamelCase__ : Dict = pipe(**__magic_name__ )[0]
UpperCamelCase__ : List[str] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : List[str] = steps
UpperCamelCase__ : Optional[int] = scale
UpperCamelCase__ : str = pipe(**__magic_name__, control_guidance_start=0.1, control_guidance_end=0.2 )[0]
UpperCamelCase__ : List[Any] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : Optional[Any] = steps
UpperCamelCase__ : Optional[int] = scale
UpperCamelCase__ : Optional[int] = pipe(**__magic_name__, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7] )[0]
UpperCamelCase__ : List[Any] = self.get_dummy_inputs(__magic_name__ )
UpperCamelCase__ : List[Any] = steps
UpperCamelCase__ : Optional[int] = scale
UpperCamelCase__ : Tuple = pipe(**__magic_name__, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str = self.get_dummy_components()
UpperCamelCase__ : Dict = self.pipeline_class(**__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__magic_name__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCamelCase__ : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', safety_checker=__magic_name__, controlnet=__magic_name__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCamelCase__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ : int = '''evil space-punk bird'''
UpperCamelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
UpperCamelCase__ : Any = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
UpperCamelCase__ : Dict = pipe(
__magic_name__, __magic_name__, control_image=__magic_name__, generator=__magic_name__, output_type='''np''', num_inference_steps=50, strength=0.6, )
UpperCamelCase__ : Any = output.images[0]
assert image.shape == (512, 512, 3)
UpperCamelCase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 367 |
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> list[int]:
if length <= 0 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(__UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247 | 0 |
_SCREAMING_SNAKE_CASE = {str(digit): digit**5 for digit in range(1_0)}
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase_ ) )
def lowercase( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase_ ) )
if __name__ == "__main__":
print(solution())
| 343 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | 1 |
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int = 4_0_0_0_0_0_0 ):
'''simple docstring'''
__snake_case : int = [0, 1]
__snake_case : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__snake_case : Tuple = 0
for j in range(len(__SCREAMING_SNAKE_CASE ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 20 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowerCamelCase : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowerCamelCase : int = concatenate_datasets
_lowerCamelCase : List[Any] = DownloadConfig
_lowerCamelCase : Optional[int] = DownloadManager
_lowerCamelCase : Dict = DownloadMode
_lowerCamelCase : Optional[Any] = DownloadConfig
_lowerCamelCase : Optional[int] = DownloadMode
_lowerCamelCase : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 336 |
import argparse
from collections import defaultdict
import yaml
__a = 'docs/source/en/_toctree.yml'
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def a ( snake_case__: List[Any]=False ):
'''simple docstring'''
with open(snake_case__ , encoding='''utf-8''' ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]['''sections''']
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]['''sections''']
lowercase_ = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc['''sections''']
lowercase_ = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 30 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['BeitFeatureExtractor']
lowerCamelCase_ = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
A : Tuple = get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCamelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
SCREAMING_SNAKE_CASE_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Loading model from {input_model_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__UpperCamelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCamelCase , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE_ = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__UpperCamelCase )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(__UpperCamelCase , __UpperCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__UpperCamelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__UpperCamelCase ) , )
SCREAMING_SNAKE_CASE_ = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
optimizer.load_state_dict(__UpperCamelCase )
| 118 | from ...processing_utils import ProcessorMixin
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''feature_extractor''']
lowerCamelCase__ = '''TvltImageProcessor'''
lowerCamelCase__ = '''TvltFeatureExtractor'''
def __init__( self : List[str] , __magic_name__ : Any , __magic_name__ : Any ) -> int:
super().__init__(image_processor=__magic_name__ , feature_extractor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = image_processor
SCREAMING_SNAKE_CASE_ = feature_extractor
def __call__( self : List[str] , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int=None , __magic_name__ : str=None , __magic_name__ : Any=False , __magic_name__ : int=False , *__magic_name__ : int , **__magic_name__ : Any , ) -> List[Any]:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
SCREAMING_SNAKE_CASE_ = None
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , mask_pixel=__magic_name__ , *__magic_name__ , **__magic_name__ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , is_mixed=__magic_name__ , *__magic_name__ , **__magic_name__ )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(
__magic_name__ , *__magic_name__ , sampling_rate=__magic_name__ , mask_audio=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = {}
if audio is not None:
output_dict.update(__magic_name__ )
if images is not None:
output_dict.update(__magic_name__ )
if images_mixed_dict is not None:
output_dict.update(__magic_name__ )
return output_dict
@property
def __A ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 118 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : list[list[int]] = []
lowercase__ : list[int] = []
lowercase__ : Union[str, Any] = 0
lowercase__ : Union[str, Any] = sum(UpperCAmelCase )
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return result
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if sum(UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(UpperCAmelCase )) < max_sum:
return
if sum(UpperCAmelCase ) == max_sum:
result.append(UpperCAmelCase )
return
for index in range(UpperCAmelCase , len(UpperCAmelCase ) ):
create_state_space_tree(
UpperCAmelCase , UpperCAmelCase , index + 1 , [*path, nums[index]] , UpperCAmelCase , remaining_nums_sum - nums[index] , )
__a: List[str] = [3, 34, 4, 12, 5, 2]
__a: Dict = 9
__a: Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 214 | '''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Tuple = {"""tokenizer_file""": """tokenizer.json"""}
__a: Union[str, Any] = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = None
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=False , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
lowercase__ : int = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase__ : Tuple = add_prefix_space
lowercase__ : List[str] = pre_tok_class(**__lowerCAmelCase )
lowercase__ : Union[str, Any] = add_prefix_space
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> BatchEncoding:
lowercase__ : str = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
lowercase__ : List[Any] = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[int]:
lowercase__ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) + [self.eos_token_id] )
if len(__lowerCAmelCase ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 214 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) -> str:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ) -> int:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ) -> Any:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : int , **__UpperCamelCase : int ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> str:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ) -> List[str]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ) -> List[str]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ) -> Tuple:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : str ) -> Tuple:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : List[str] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ) -> int:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : str ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ) -> Any:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Union[str, Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''flax'''] )
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''flax''']
def __init__( self : Dict , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) -> Optional[Any]:
requires_backends(self , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ) -> Dict:
requires_backends(cls , ['''flax'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''flax'''] )
| 256 | 0 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
return 1_0 - x * x
def lowercase (_A , _A ):
"""simple docstring"""
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
_lowerCAmelCase : List[str] = a
while (b - a) >= 0.01:
# Find middle point
_lowerCAmelCase : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
_lowerCAmelCase : Optional[int] = c
else:
_lowerCAmelCase : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 25 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "nat"
__magic_name__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case__=4 , snake_case__=3 , snake_case__=64 , snake_case__=[3, 4, 6, 5] , snake_case__=[2, 4, 8, 16] , snake_case__=7 , snake_case__=3.0 , snake_case__=True , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=None , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Any = depths
_lowerCAmelCase : Dict = len(snake_case__ )
_lowerCAmelCase : str = num_heads
_lowerCAmelCase : Dict = kernel_size
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : int = qkv_bias
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : str = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 25 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase = AutoConfig.from_pretrained(lowerCamelCase__ )
lowerCamelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase__ )
lowerCamelCase = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
lowerCamelCase = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
lowerCamelCase = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase = f'layers_{str(lowerCamelCase__ )}'
# Self-Attention
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
lowerCamelCase = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
lowerCamelCase = flax_model.params["""encoder"""]["""block"""][str(lowerCamelCase__ )]["""layer"""]
lowerCamelCase = tax_attention_key
lowerCamelCase = tax_attention_out
lowerCamelCase = tax_attention_query
lowerCamelCase = tax_attention_value
lowerCamelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase = tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase = tax_mlp_wi_a
lowerCamelCase = tax_mlp_wi_a
else:
lowerCamelCase = tax_mlp_wi
lowerCamelCase = tax_mlp_wo
lowerCamelCase = tax_mlp_layer_norm
lowerCamelCase = flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
lowerCamelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
lowerCamelCase = tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
lowerCamelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase = f'layers_{str(lowerCamelCase__ )}'
# Self-Attention
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
lowerCamelCase = tax_enc_dec_attention_module["""key"""]["""kernel"""]
lowerCamelCase = tax_enc_dec_attention_module["""out"""]["""kernel"""]
lowerCamelCase = tax_enc_dec_attention_module["""query"""]["""kernel"""]
lowerCamelCase = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
lowerCamelCase = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
lowerCamelCase = flax_model.params["""decoder"""]["""block"""][str(lowerCamelCase__ )]["""layer"""]
lowerCamelCase = tax_attention_key
lowerCamelCase = tax_attention_out
lowerCamelCase = tax_attention_query
lowerCamelCase = tax_attention_value
lowerCamelCase = tax_pre_attention_layer_norm
lowerCamelCase = tax_enc_dec_attention_key
lowerCamelCase = tax_enc_dec_attention_out
lowerCamelCase = tax_enc_dec_attention_query
lowerCamelCase = tax_enc_dec_attention_value
lowerCamelCase = tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase = tax_mlp_wi_a
lowerCamelCase = tax_mlp_wi_a
else:
lowerCamelCase = tax_mlp_wi
lowerCamelCase = tax_mlp_wo
lowerCamelCase = txa_mlp_layer_norm
lowerCamelCase = flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
lowerCamelCase = txa_decoder_norm
# Only for layer 0:
lowerCamelCase = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
lowerCamelCase = tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
lowerCamelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowerCamelCase__ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
UpperCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 252 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCamelCase, __lowerCamelCase = "cpu", __lowerCamelCase = None ):
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase, torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE_ = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE_ = src_path
torch.save(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 257 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 257 | 1 |
from math import ceil
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_001 ) -> int:
lowercase : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase : List[Any] = 2 * i + 1
lowercase : Tuple = 2 * i
lowercase : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowercase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 20 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCAmelCase__ = logging.getLogger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = label_idx
def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = mode.value
A__ = os.path.join(lowercase , F'{mode}.txt' )
A__ = 1
A__ = []
with open(lowercase , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(lowercase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
return examples
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(lowercase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a__ ( snake_case ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase , lowercase ):
A__ = mode.value
A__ = os.path.join(lowercase , F'{mode}.txt' )
A__ = 1
A__ = []
with open(lowercase , encoding="utf-8" ) as f:
for sentence in parse_incr(lowercase ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(lowercase ) == len(lowercase )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=lowercase , labels=lowercase ) )
guid_index += 1
return examples
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = 0
for sentence in parse_incr(lowercase ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(lowercase )
example_id += 1
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 68 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = LEDTokenizer
__UpperCamelCase : Optional[int] = LEDTokenizerFast
__UpperCamelCase : Union[str, Any] = True
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
_A: Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_A: Union[str, Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_A: Any = {'unk_token': '<unk>'}
_A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __magic_name__ ( self : Tuple , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_A: Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: Dict = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_A: Any = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: Optional[Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Dict = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: List[Any] = tokenizer(text_target=lowerCAmelCase_ , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: List[str] = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[str] = ['A long paragraph for summarization.']
_A: Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: int = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_A: Dict = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_A: str = inputs['input_ids']
_A: Optional[int] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A: Union[str, Any] = ['Summary of the text.', 'Another summary.']
_A: List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_A: str = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: List[Any] = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['input_ids']]
_A: Optional[Any] = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_A: Dict = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_A: int = 'A, <mask> AllenNLP sentence.'
_A: int = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_A: List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_A: Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 368 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = GPTSanJapaneseTokenizer
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : str = {'''do_clean_text''': False, '''add_prefix_space''': False}
def __magic_name__ ( self : Any ):
"""simple docstring"""
super().setUp()
# fmt: off
_A: Union[str, Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
_A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
_A: str = {'''unk_token''': '''<unk>'''}
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowerCAmelCase_ ) )
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: Optional[Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
_A , _A: Optional[int] = self.get_input_output_texts(lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: Tuple = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
return text, ids
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Dict ):
"""simple docstring"""
pass # TODO add if relevant
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: List[str] = self.get_tokenizer()
# Testing tokenization
_A: List[Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
_A: Dict = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
_A: List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids without special tokens
_A: Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Testing conversion to ids with special tokens
_A: Dict = tokens + [tokenizer.unk_token]
_A: str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
_A: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Dict = self.get_tokenizer()
# Testing tokenization
_A: Optional[int] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
_A: str = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
_A: Tuple = tokenizer.encode(lowerCAmelCase_ )
_A: List[str] = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Union[str, Any] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: str = '''こんにちは、世界。こんばんは、世界。😀'''
_A: List[Any] = tokenizer.encode(prefix_text + input_text )
_A: Optional[Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
_A: List[Any] = tokenizer.encode(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ )
_A: Union[str, Any] = tokenizer.decode(lowerCAmelCase_ )
_A: Any = tokenizer.decode(lowerCAmelCase_ )
_A: Dict = tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
_A: Optional[int] = '''こんにちは、世界。'''
_A: Optional[int] = '''こんばんは、㔺界。😀'''
_A: Any = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: int = len(tokenizer.encode(lowerCAmelCase_ ) ) - 2
_A: Optional[Any] = [1] + [0] * (len_prefix + len_text + 1)
_A: Any = [1] * (len_prefix + len_text + 1) + [0]
_A: Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_A: Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
_A: List[str] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
_A: Dict = tokenizer(lowerCAmelCase_ , prefix_text=lowerCAmelCase_ ).token_type_ids
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: List[Any] = tokenizer.encode('''あンいワ''' )
_A: Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
_A: Union[str, Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertEqual(tokenizer.decode(lowerCAmelCase_ ) , tokenizer.decode(lowerCAmelCase_ ) )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
_A: Optional[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
_A: Optional[int] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_A: Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase_ , padding=lowerCAmelCase_ )
# fmt: off
_A: Tuple = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
_A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_A: Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token.attention_mask , lowerCAmelCase_ )
self.assertListEqual(x_token_a.input_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.token_type_ids , lowerCAmelCase_ )
self.assertListEqual(x_token_a.attention_mask , lowerCAmelCase_ )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
# tokenizer has no padding token
pass
| 301 | 0 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "AAPL" ):
'''simple docstring'''
_lowerCAmelCase : str = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : List[Any] = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 36 |
from PIL import Image
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = image.size
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = image.load()
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_snake_case = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 36 | 1 |
from __future__ import annotations
def _A ( lowerCAmelCase_ : int = 4 ):
"""simple docstring"""
lowerCAmelCase__ = abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase__ = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase__ = matrix[::-1]
return matrix
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
lowerCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def _A ( lowerCAmelCase_ : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 356 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = AutoConfig.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
lowerCAmelCase__ = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 221 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
if "model" in orig_key:
UpperCAmelCase_: List[Any] = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
UpperCAmelCase_: List[str] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCAmelCase_: Optional[Any] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
UpperCAmelCase_: List[Any] = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
UpperCAmelCase_: List[Any] = orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCAmelCase_: List[str] = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
UpperCAmelCase_: int = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
UpperCAmelCase_: Dict = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
UpperCAmelCase_: List[str] = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
UpperCAmelCase_: Optional[Any] = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
UpperCAmelCase_: Optional[int] = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
UpperCAmelCase_: List[str] = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
UpperCAmelCase_: Optional[int] = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
UpperCAmelCase_: Optional[Any] = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
UpperCAmelCase_: Optional[Any] = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCAmelCase_: str = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCAmelCase_: Optional[int] = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_: List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase_: List[Any] = val
UpperCAmelCase_: str = orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCAmelCase_: Dict = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model_state_dict"""]
UpperCAmelCase_: Tuple = YosoConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_: int = YosoForMaskedLM(lowerCAmelCase__ )
UpperCAmelCase_: Dict = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 147 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a : Dict = logging.get_logger(__name__)
a : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a : Optional[int] = {
'RUCAIBox/mvp': 1_024,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = MvpTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: str = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop("""type""" ) )
UpperCAmelCase_: Dict = add_prefix_space
UpperCAmelCase_: List[str] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_: Optional[int] = """post_processor"""
UpperCAmelCase_: Any = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCAmelCase_: Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_: Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_: int = tuple(state["""cls"""] )
UpperCAmelCase_: Any = False
if state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: Tuple = add_prefix_space
UpperCAmelCase_: Union[str, Any] = True
if state.get("""trim_offsets""", SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCAmelCase_: Optional[Any] = trim_offsets
UpperCAmelCase_: Dict = True
if changes_to_apply:
UpperCAmelCase_: Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop("""type""" ) )
UpperCAmelCase_: Dict = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCAmelCase_: str = value
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: int = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: Union[str, Any] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> int:
UpperCAmelCase_: Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Dict = [self.sep_token_id]
UpperCAmelCase_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCAmelCase__ ( snake_case__ ):
'''simple docstring'''
lowerCamelCase__ = """esm"""
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase=0.1 , lowercase=0.1 , lowercase=1026 , lowercase=0.02 , lowercase=1E-12 , lowercase="absolute" , lowercase=True , lowercase=None , lowercase=False , lowercase=False , lowercase=None , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=UpperCAmelCase_ , mask_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[Any] = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Union[str, Any] = emb_layer_norm_before
_lowerCamelCase : str = token_dropout
_lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_lowerCamelCase : Optional[Any] = EsmFoldConfig()
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCamelCase : Optional[int] = EsmFoldConfig(**UpperCAmelCase_ )
_lowerCamelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_lowerCamelCase : Dict = get_default_vocab_list()
else:
_lowerCamelCase : str = vocab_list
else:
_lowerCamelCase : Any = None
_lowerCamelCase : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , UpperCAmelCase_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def A_ ( self ):
_lowerCamelCase : Optional[int] = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase_ ):
_lowerCamelCase : List[Any] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 0
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = 1_28
lowerCamelCase__ = None
def A_ ( self ):
if self.trunk is None:
_lowerCamelCase : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase_ ):
_lowerCamelCase : str = TrunkConfig(**self.trunk )
def A_ ( self ):
_lowerCamelCase : Tuple = asdict(self )
_lowerCamelCase : Tuple = self.trunk.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 48
lowerCamelCase__ = 10_24
lowerCamelCase__ = 1_28
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 32
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = False
lowerCamelCase__ = 4
lowerCamelCase__ = 1_28
lowerCamelCase__ = None
def A_ ( self ):
if self.structure_module is None:
_lowerCamelCase : int = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase_ ):
_lowerCamelCase : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_lowerCamelCase : Optional[int] = self.sequence_state_dim // self.sequence_head_width
_lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def A_ ( self ):
_lowerCamelCase : List[str] = asdict(self )
_lowerCamelCase : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 3_84
lowerCamelCase__ = 1_28
lowerCamelCase__ = 16
lowerCamelCase__ = 1_28
lowerCamelCase__ = 12
lowerCamelCase__ = 4
lowerCamelCase__ = 8
lowerCamelCase__ = 0.1
lowerCamelCase__ = 8
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 7
lowerCamelCase__ = 10
lowerCamelCase__ = 1e-8
lowerCamelCase__ = 1e5
def A_ ( self ):
return asdict(self )
def _snake_case ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 364 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_UpperCamelCase = random.Random()
def _lowercase ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ):
if rng is None:
__lowerCAmelCase : Tuple = global_rng
__lowerCAmelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase (unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Optional[int] = min_seq_length
__lowerCAmelCase : Optional[Any] = max_seq_length
__lowerCAmelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase : Any = padding_value
__lowerCAmelCase : Optional[Any] = sampling_rate
__lowerCAmelCase : List[str] = return_attention_mask
__lowerCAmelCase : Union[str, Any] = do_normalize
__lowerCAmelCase : int = feature_size
__lowerCAmelCase : str = chunk_length
__lowerCAmelCase : Union[str, Any] = hop_length
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , A_=False , A_=False ) ->str:
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
__lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase : List[str] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase (lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : int = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
__lowerCAmelCase : Dict = feat_extract_first.to_dict()
__lowerCAmelCase : Tuple = feat_extract_second.to_dict()
__lowerCAmelCase : Optional[Any] = feat_extract_first.mel_filters
__lowerCAmelCase : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCamelCase__ )
__lowerCAmelCase : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
__lowerCAmelCase : Any = feat_extract_first.to_dict()
__lowerCAmelCase : str = feat_extract_second.to_dict()
__lowerCAmelCase : str = feat_extract_first.mel_filters
__lowerCAmelCase : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCAmelCase : Optional[Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
__lowerCAmelCase : Any = feature_extractor(lowerCamelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowerCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__lowerCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test batched
__lowerCAmelCase : int = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Dict = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCAmelCase : str = np.asarray(lowerCamelCase__ )
__lowerCAmelCase : List[str] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
# Test truncation required
__lowerCAmelCase : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__lowerCAmelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
__lowerCAmelCase : List[Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCAmelCase : Any = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
__lowerCAmelCase : List[str] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
__lowerCAmelCase : Any = feature_extractor(lowerCamelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
import torch
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
__lowerCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase : Optional[int] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowerCAmelCase : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowerCAmelCase : Dict = ds.sort('''id''' ).select(range(lowerCamelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__lowerCAmelCase : Union[str, Any] = self._load_datasamples(1 )
__lowerCAmelCase : Dict = WhisperFeatureExtractor()
__lowerCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1e-4 ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase : int = self._load_datasamples(1 )[0]
__lowerCAmelCase : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__lowerCAmelCase : Dict = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1e-3 ) )
| 275 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int]=7 ,lowerCamelCase__ : Optional[Any]=400 ,lowerCamelCase__ : List[str]=2000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Any=128 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : str=512 ,lowerCamelCase__ : Optional[Any]=30 ,lowerCamelCase__ : Tuple=44100 ,) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = spectrogram_length
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = num_audio_channels
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = chunk_length
SCREAMING_SNAKE_CASE = sampling_rate
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=False ) -> str:
'''simple docstring'''
def _flatten(lowerCamelCase__ : List[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""sampling_rate""" ) )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=lowerCamelCase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""BeitFeatureExtractor"""]
UpperCAmelCase__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 30 | """simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Union[str, Any]=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__lowerCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(__lowerCAmelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
_UpperCAmelCase = """A = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = """B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = """A*B = """ + """ + """.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase , lowerCamelCase = array[indexa], array[indexa]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None:
if length > 1:
lowerCamelCase = int(length / 2 )
for i in range(snake_case_ , low + middle ):
comp_and_swap(snake_case_ , snake_case_ , i + middle , snake_case_ )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
bitonic_merge(snake_case_ , low + middle , snake_case_ , snake_case_ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None:
if length > 1:
lowerCamelCase = int(length / 2 )
bitonic_sort(snake_case_ , snake_case_ , snake_case_ , 1 )
bitonic_sort(snake_case_ , low + middle , snake_case_ , 0 )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
lowerCAmelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 291 | '''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __A ( UpperCamelCase__ ):
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = 5
# Realm tok
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__a , exist_ok=__a )
UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__a , exist_ok=__a )
def _lowercase (self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _lowercase (self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase (self : List[str] ):
UpperCAmelCase_ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _lowercase (self : Any ):
UpperCAmelCase_ = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=__a , )
return block_records
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase (self : int ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = self.get_dummy_retriever()
UpperCAmelCase_ = retriever.tokenizer
UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" )
UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids
UpperCAmelCase_ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids
UpperCAmelCase_ = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever(
__a , __a , answer_ids=__a , max_length=__a , return_tensors="np" )
self.assertEqual([False, True, True] , __a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
UpperCAmelCase_ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 1 | 0 |
"""simple docstring"""
import math
def snake_case_ ( A_ : Any, A_ : Dict = 0, A_ : Union[str, Any] = 0 ):
'''simple docstring'''
_lowerCamelCase : Tuple = end or len(A_ )
for i in range(A_, A_ ):
_lowerCamelCase : Dict = i
_lowerCamelCase : List[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowerCamelCase : str = array[temp_index - 1]
temp_index -= 1
_lowerCamelCase : int = temp_index_value
return array
def snake_case_ ( A_ : Optional[int], A_ : List[Any], A_ : Optional[Any] ): # Max Heap
'''simple docstring'''
_lowerCamelCase : List[str] = index
_lowerCamelCase : Any = 2 * index + 1 # Left Node
_lowerCamelCase : int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowerCamelCase : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowerCamelCase : int = right_index
if largest != index:
_lowerCamelCase : Dict = array[largest], array[index]
heapify(A_, A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
_lowerCamelCase : Any = len(A_ )
for i in range(n // 2, -1, -1 ):
heapify(A_, A_, A_ )
for i in range(n - 1, 0, -1 ):
_lowerCamelCase : List[str] = array[0], array[i]
heapify(A_, 0, A_ )
return array
def snake_case_ ( A_ : Any, A_ : Union[str, Any], A_ : str, A_ : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : Tuple, A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : str = low
_lowerCamelCase : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowerCamelCase : Union[str, Any] = array[j], array[i]
i += 1
def snake_case_ ( A_ : int ):
'''simple docstring'''
if len(A_ ) == 0:
return array
_lowerCamelCase : int = 2 * math.ceil(math.loga(len(A_ ) ) )
_lowerCamelCase : Optional[Any] = 16
return intro_sort(A_, 0, len(A_ ), A_, A_ )
def snake_case_ ( A_ : List[Any], A_ : Dict, A_ : Optional[int], A_ : Union[str, Any], A_ : Optional[int] ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
_lowerCamelCase : List[str] = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 )
_lowerCamelCase : Union[str, Any] = partition(A_, A_, A_, A_ )
intro_sort(A_, A_, A_, A_, A_ )
_lowerCamelCase : Optional[int] = p
return insertion_sort(A_, A_, A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by a comma : ''').strip()
lowerCAmelCase__ = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 358 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def snake_case_ ( A_ : str, A_ : Tuple, A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = state_dict.pop(A_ )
_lowerCamelCase : Union[str, Any] = val
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : List[Any] = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
_lowerCamelCase : int = value
else:
_lowerCamelCase : List[str] = value
return new_state_dict
def snake_case_ ( A_ : Optional[int], A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Any = ''''''
if is_panoptic:
_lowerCamelCase : Optional[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowerCamelCase : Dict = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:2_56, :]
_lowerCamelCase : int = in_proj_bias[:2_56]
_lowerCamelCase : str = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[2_56:5_12]
_lowerCamelCase : List[Any] = in_proj_weight[-2_56:, :]
_lowerCamelCase : List[str] = in_proj_bias[-2_56:]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : Union[str, Any] = '''resnet101'''
if "dc5" in model_name:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = '''panoptic''' in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 2_50
else:
_lowerCamelCase : int = 91
_lowerCamelCase : List[str] = '''huggingface/label-files'''
_lowerCamelCase : Any = '''coco-detection-id2label.json'''
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : str = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : int = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_lowerCamelCase : Any = ConditionalDetrImageProcessor(format=A_ )
# prepare image
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : str = image_processor(images=A_, return_tensors='''pt''' )
_lowerCamelCase : Union[str, Any] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
_lowerCamelCase : int = torch.hub.load('''DeppMeng/ConditionalDETR''', A_, pretrained=A_ ).eval()
_lowerCamelCase : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : Optional[Any] = '''conditional_detr.''' + src
rename_key(A_, A_, A_ )
_lowerCamelCase : Dict = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_, is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_lowerCamelCase : List[Any] = state_dict.pop(A_ )
_lowerCamelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : List[str] = state_dict.pop(A_ )
_lowerCamelCase : Optional[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_lowerCamelCase : Optional[Any] = state_dict.pop(A_ )
_lowerCamelCase : Any = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_lowerCamelCase : int = state_dict.pop(A_ )
_lowerCamelCase : str = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Dict = ConditionalDetrForSegmentation(A_ ) if is_panoptic else ConditionalDetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
model.push_to_hub(repo_id=A_, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
_lowerCamelCase : Dict = conditional_detr(A_ )
_lowerCamelCase : Optional[int] = model(A_ )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 175 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger('''transformers.models.encodec''')
UpperCamelCase_ = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
UpperCamelCase_ = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
UpperCamelCase_ = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
UpperCamelCase_ = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
UpperCamelCase_ = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
UpperCamelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
UpperCamelCase_ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
UpperCamelCase_ = []
UpperCamelCase_ = []
def lowerCamelCase_ ( _a : Any , _a : Any , _a : Tuple , _a : List[Any] , _a : str ):
'''simple docstring'''
for attribute in key.split(""".""" ):
UpperCAmelCase_ : Any = getattr(_a , _a )
if weight_type is not None:
UpperCAmelCase_ : str = getattr(_a , _a ).shape
else:
UpperCAmelCase_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ : List[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Tuple = value
elif weight_type == "weight_v":
UpperCAmelCase_ : str = value
elif weight_type == "bias":
UpperCAmelCase_ : str = value
elif weight_type == "running_mean":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "running_var":
UpperCAmelCase_ : str = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
UpperCAmelCase_ : Dict = value
elif weight_type == "weight_hh_l0":
UpperCAmelCase_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
UpperCAmelCase_ : Any = value
elif weight_type == "bias_hh_l0":
UpperCAmelCase_ : Optional[int] = value
elif weight_type == "weight_ih_l1":
UpperCAmelCase_ : str = value
elif weight_type == "weight_hh_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_ih_l1":
UpperCAmelCase_ : int = value
elif weight_type == "bias_hh_l1":
UpperCAmelCase_ : Optional[Any] = value
else:
UpperCAmelCase_ : Dict = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowerCamelCase_ ( _a : Optional[int] , _a : int ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase_ , UpperCAmelCase_ : str = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] , _a : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCAmelCase_ : Tuple = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCAmelCase_ : List[str] = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(_a , _a ):
logger.info(F'''{name} was ignored''' )
continue
UpperCAmelCase_ : List[str] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCAmelCase_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
UpperCAmelCase_ : int = True
if "*" in mapped_key:
UpperCAmelCase_ : List[str] = name.split(_a )[0].split(""".""" )[-2]
UpperCAmelCase_ : int = mapped_key.replace("""*""" , _a )
if "weight_g" in name:
UpperCAmelCase_ : List[str] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
UpperCAmelCase_ : Optional[Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
UpperCAmelCase_ : List[str] = """weight_hh_l0"""
elif "bias_ih_l0" in name:
UpperCAmelCase_ : Dict = """bias_ih_l0"""
elif "bias_hh_l0" in name:
UpperCAmelCase_ : Union[str, Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
UpperCAmelCase_ : Union[str, Any] = """weight_ih_l1"""
elif "weight_hh_l1" in name:
UpperCAmelCase_ : str = """weight_hh_l1"""
elif "bias_ih_l1" in name:
UpperCAmelCase_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
UpperCAmelCase_ : Any = """bias_hh_l1"""
elif "bias" in name:
UpperCAmelCase_ : Union[str, Any] = """bias"""
elif "weight" in name:
UpperCAmelCase_ : List[Any] = """weight"""
elif "running_mean" in name:
UpperCAmelCase_ : Optional[int] = """running_mean"""
elif "running_var" in name:
UpperCAmelCase_ : Any = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase_ : Tuple = """num_batches_tracked"""
else:
UpperCAmelCase_ : Tuple = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Any , _a : Optional[Any] , _a : int=None , _a : Optional[int]=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : List[Any] = EncodecConfig.from_pretrained(_a )
else:
UpperCAmelCase_ : str = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCAmelCase_ : Dict = [8, 5, 4, 4]
UpperCAmelCase_ : Dict = [2.2]
UpperCAmelCase_ : Optional[int] = 64
UpperCAmelCase_ : Tuple = 3_2000
UpperCAmelCase_ : List[str] = 2048
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
elif model_name == "encodec_48khz":
UpperCAmelCase_ : Optional[Any] = [8, 5, 4, 2]
UpperCAmelCase_ : str = [3.0, 6.0, 1_2.0, 2_4.0]
UpperCAmelCase_ : int = 4_8000
UpperCAmelCase_ : int = 2
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Optional[int] = """time_group_norm"""
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : int = 1.0
UpperCAmelCase_ : Any = 0.0_1
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
UpperCAmelCase_ : int = EncodecModel(_a )
UpperCAmelCase_ : Tuple = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_a )
UpperCAmelCase_ : Tuple = torch.load(_a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCAmelCase_ : Optional[int] = original_checkpoint["""best_state"""]
recursively_load_weights(_a , _a , _a )
model.save_pretrained(_a )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(_a )
model.push_to_hub(_a )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 345 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: int=13 ,lowerCamelCase_: int=32 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: Any=3 ,lowerCamelCase_: str=16 ,lowerCamelCase_: Optional[Any]=[1, 2, 1] ,lowerCamelCase_: Tuple=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[Any]=2.0 ,lowerCamelCase_: str=True ,lowerCamelCase_: Optional[int]=0.0 ,lowerCamelCase_: List[Any]=0.0 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=None ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Optional[int]=10 ,lowerCamelCase_: Tuple=8 ,) -> List[Any]:
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : List[str] = num_heads
UpperCAmelCase_ : int = window_size
UpperCAmelCase_ : List[str] = mlp_ratio
UpperCAmelCase_ : Tuple = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = drop_path_rate
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : int = use_absolute_embeddings
UpperCAmelCase_ : Any = patch_norm
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Optional[Any] = type_sequence_label_size
UpperCAmelCase_ : List[str] = encoder_stride
def A__ ( self: Any ) -> int:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ) -> str:
UpperCAmelCase_ : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : Any = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : int = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A__ : Optional[Any] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A__ : List[Any] = False
A__ : Tuple = False
A__ : int = False
A__ : Union[str, Any] = False
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Any = SwinvaModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 )
def A__ ( self: Optional[int] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Any = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
UpperCAmelCase_ : List[str] = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Optional[Any] = config.window_size**2
UpperCAmelCase_ : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
if hasattr(self.model_tester ,"""num_hidden_states_types""" ):
UpperCAmelCase_ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> List[Any]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# Swinv2 has a different seq_length
UpperCAmelCase_ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = reshaped_hidden_states[0].shape
UpperCAmelCase_ : Optional[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : List[str] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Tuple:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Any ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Dict ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def A__ ( self: str ) -> List[Any]:
UpperCAmelCase_ : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowerCamelCase_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Optional[int] = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
| 345 | 1 |
from __future__ import annotations
from math import pi
def __lowerCamelCase ( __snake_case : float, __snake_case : float, __snake_case : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__snake_case : Optional[int] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __lowerCamelCase ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =test_results.split(""" """ )
A__ : List[Any] =0
A__ : Optional[int] =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : Dict =expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCamelCase ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Dict ={}
A__ : List[Any] =None
A__ : Any =False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", __snake_case ):
A__ : List[str] =True
A__ : Optional[int] =line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : List[str] =line
A__ : int =False
return failures
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Any =title
A__ : List[Any] =doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str =doc_test_results["""success"""]
A__ : str =doc_test_results["""failures"""]
A__ : Optional[int] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : List[Any] =doc_test_results
@property
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : List[str] =[self._time_spent]
A__ : str =0
for time in time_spent:
A__ : Union[str, Any] =time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_ ) == 1:
A__ : str =[0, 0, time_parts[0]]
A__ , A__ , A__ : int =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ : Optional[int] =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(lowerCAmelCase_ )}h{int(lowerCAmelCase_ )}m{int(lowerCAmelCase_ )}s"
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =40
A__ : List[Any] ={k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )}
A__ : Union[str, Any] =""""""
for category, failures in category_failures.items():
if len(lowerCAmelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Tuple =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase_ )
@staticmethod
def lowercase__ ( ) -> Any:
'''simple docstring'''
A__ : Dict =[
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Tuple =f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed."""
A__ : Optional[int] =client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =""""""
for key, value in failures.items():
A__ : Optional[int] =value[:2_00] + """ [Truncated]""" if len(lowerCAmelCase_ ) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
A__ : List[Any] =job_name
A__ : Dict ={"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict ={
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : Optional[Any] =self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[Any] =f"*Num failures* :{len(job_result['failed'] )} \n"
A__ : Tuple =job_result["""failures"""]
A__ : Optional[Any] =self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =os.environ["""GITHUB_RUN_ID"""]
A__ : Tuple =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
A__ : List[str] =requests.get(__snake_case ).json()
A__ : List[str] ={}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[int] =math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
A__ : List[Any] =requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", __snake_case )
return {}
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any ={}
if os.path.exists(__snake_case ):
A__ : str =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case, __snake_case ), encoding="""utf-8""" ) as f:
A__ : Tuple =f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__snake_case, __snake_case )}." ) from e
return _artifact
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : List[Any] =name
A__ : str =[]
def __str__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.name
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] ={}
A__ : int =filter(os.path.isdir, os.listdir() )
for directory in directories:
A__ : List[Any] =directory
if artifact_name not in _available_artifacts:
A__ : str =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
__snake_case : List[str] = get_job_links()
__snake_case : int = retrieve_available_artifacts()
__snake_case : Union[str, Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__snake_case : Dict = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__snake_case : List[Any] = github_actions_job_links.get('run_doctests')
__snake_case : Tuple = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__snake_case : Optional[int] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__snake_case , __snake_case , __snake_case : Optional[Any] = handle_test_results(artifact['stats'])
__snake_case : Optional[Any] = failed
__snake_case : Union[str, Any] = success
__snake_case : Union[str, Any] = time_spent[1:-1] + ', '
__snake_case : int = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__snake_case : Optional[int] = line.replace('FAILED ', '')
__snake_case : str = line.split()[0].replace('\n', '')
if "::" in line:
__snake_case , __snake_case : Optional[Any] = line.split('::')
else:
__snake_case , __snake_case : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__snake_case : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__snake_case : List[str] = all_failures[test] if test in all_failures else 'N/A'
__snake_case : Optional[Any] = failure
break
__snake_case : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 136 | 0 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A_ ( _lowercase ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = np.max(_outputs, axis=-1, keepdims=_lowercase )
snake_case_ :Optional[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=_lowercase )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : List[str] = """sigmoid"""
_A : Union[str, Any] = """softmax"""
_A : Tuple = """none"""
@add_end_docstrings(
_lowerCAmelCase , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Optional[int] = False
_A : Any = ClassificationFunction.NONE
def __init__( self: Tuple , **snake_case: Any ) -> List[Any]:
super().__init__(**snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCAmelCase_ ( self: str , snake_case: str=None , snake_case: Any=None , snake_case: Union[str, Any]="" , **snake_case: List[str] ) -> Union[str, Any]:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
snake_case_ :List[Any] = tokenizer_kwargs
snake_case_ :int = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
snake_case_ :List[str] = self.model.config.return_all_scores
if isinstance(snake_case , snake_case ) or top_k is None:
snake_case_ :Optional[int] = top_k
snake_case_ :Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , snake_case , )
if return_all_scores:
snake_case_ :str = None
else:
snake_case_ :Tuple = 1
if isinstance(snake_case , snake_case ):
snake_case_ :List[Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case_ :Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int , *snake_case: List[str] , **snake_case: List[Any] ) -> Union[str, Any]:
snake_case_ :str = super().__call__(*snake_case , **snake_case )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case_ :Any = """top_k""" not in kwargs
if isinstance(args[0] , snake_case ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCAmelCase_ ( self: Tuple , snake_case: str , **snake_case: int ) -> Dict[str, GenericTensor]:
snake_case_ :int = self.framework
if isinstance(snake_case , snake_case ):
return self.tokenizer(**snake_case , return_tensors=snake_case , **snake_case )
elif isinstance(snake_case , snake_case ) and len(snake_case ) == 1 and isinstance(inputs[0] , snake_case ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case , **snake_case )
elif isinstance(snake_case , snake_case ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(snake_case , return_tensors=snake_case , **snake_case )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[str] ) -> List[str]:
return self.model(**snake_case )
def lowerCAmelCase_ ( self: Tuple , snake_case: List[str] , snake_case: Optional[int]=None , snake_case: List[Any]=1 , snake_case: str=True ) -> int:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case_ :Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case_ :Union[str, Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
snake_case_ :int = self.model.config.function_to_apply
else:
snake_case_ :Tuple = ClassificationFunction.NONE
snake_case_ :str = model_outputs["""logits"""][0]
snake_case_ :List[str] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case_ :List[str] = sigmoid(snake_case )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case_ :Any = softmax(snake_case )
elif function_to_apply == ClassificationFunction.NONE:
snake_case_ :Tuple = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case_ :Union[str, Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(snake_case )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case : x["score"] , reverse=snake_case )
if top_k is not None:
snake_case_ :int = dict_scores[:top_k]
return dict_scores
| 66 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class snake_case :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14") ->None:
a_ = device
a_ = CLIPTokenizerFast.from_pretrained(__UpperCAmelCase)
a_ = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a_ = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a_ = torchvision.transforms.Normalize(self.image_mean , self.image_std)
a_ = torchvision.transforms.Resize(2_24)
a_ = torchvision.transforms.CenterCrop(2_24)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]:
a_ = self.resize(__UpperCAmelCase)
a_ = self.center_crop(__UpperCAmelCase)
a_ = self.normalize(__UpperCAmelCase)
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase) ->Union[str, Any]:
a_ = self.tokenizer(text=__UpperCAmelCase , **__UpperCAmelCase)
a_ = self.preprocess_img(__UpperCAmelCase)
a_ = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class snake_case ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) ->None:
super().__init__()
a_ = None
a_ = device if device else get_device()
if vqgan:
a_ = vqgan
else:
a_ = load_vqgan(self.device , conf_path=__UpperCAmelCase , ckpt_path=__UpperCAmelCase)
self.vqgan.eval()
if clip:
a_ = clip
else:
a_ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
a_ = ProcessorGradientFlow(device=self.device)
a_ = iterations
a_ = lr
a_ = log
a_ = make_grid
a_ = return_val
a_ = quantize
a_ = self.vqgan.decoder.z_shape
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True) ->Any:
a_ = []
if output_path is None:
a_ = "./animation.gif"
if input_path is None:
a_ = self.save_path
a_ = sorted(glob(input_path + "/*"))
if not len(__UpperCAmelCase):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__UpperCAmelCase) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
a_ = total_duration / len(__UpperCAmelCase)
a_ = [frame_duration] * len(__UpperCAmelCase)
if extend_frames:
a_ = 1.5
a_ = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__UpperCAmelCase))
imageio.mimsave(__UpperCAmelCase , __UpperCAmelCase , duration=__UpperCAmelCase)
print(F'''gif saved to {output_path}''')
def UpperCAmelCase__ ( self , __UpperCAmelCase=None , __UpperCAmelCase=None) ->List[Any]:
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
a_ = preprocess(Image.open(__UpperCAmelCase) , target_image_size=2_56).to(self.device)
a_ = preprocess_vqgan(__UpperCAmelCase)
a_ , *a_ = self.vqgan.encode(__UpperCAmelCase)
return z
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
a_ = self.latent.detach().requires_grad_()
a_ = base_latent + transform_vector
if self.quantize:
a_ , *a_ = self.vqgan.quantize(__UpperCAmelCase)
else:
a_ = trans_latent
return self.vqgan.decode(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None) ->str:
a_ = self.clip_preprocessor(text=__UpperCAmelCase , images=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase)
a_ = self.clip(**__UpperCAmelCase)
a_ = clip_outputs.logits_per_image
if weights is not None:
a_ = similarity_logits * weights
return similarity_logits.sum()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Optional[int]:
a_ = self._get_clip_similarity(pos_prompts["prompts"] , __UpperCAmelCase , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
a_ = self._get_clip_similarity(neg_prompts["prompts"] , __UpperCAmelCase , weights=neg_prompts["weights"])
else:
a_ = torch.tensor([1] , device=self.device)
a_ = -torch.log(__UpperCAmelCase) + torch.log(__UpperCAmelCase)
return loss
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = torch.randn_like(self.latent , requires_grad=__UpperCAmelCase , device=self.device)
a_ = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
a_ = self._add_vector(__UpperCAmelCase)
a_ = loop_post_process(__UpperCAmelCase)
a_ = self._get_CLIP_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
print("CLIP loss" , __UpperCAmelCase)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__UpperCAmelCase)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Tuple:
wandb.init(reinit=__UpperCAmelCase , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
a_ = Image.open(__UpperCAmelCase)
a_ = image.resize((2_56, 2_56))
wandb.log("Original Image" , wandb.Image(__UpperCAmelCase))
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if not prompts:
return []
a_ = []
a_ = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a_ = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__UpperCAmelCase , (tuple, list)):
a_ = prompt[0]
a_ = float(prompt[1])
elif ":" in prompt:
a_ , a_ = prompt.split(":")
a_ = float(__UpperCAmelCase)
else:
a_ = prompt
a_ = 1.0
processed_prompts.append(__UpperCAmelCase)
weights.append(__UpperCAmelCase)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__UpperCAmelCase , device=self.device),
}
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ) ->List[Any]:
if image_path:
a_ = self._get_latent(__UpperCAmelCase)
else:
a_ = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
assert pos_prompts, "You must provide at least one positive prompt."
a_ = self.process_prompts(__UpperCAmelCase)
a_ = self.process_prompts(__UpperCAmelCase)
if save_final and save_path is None:
a_ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(__UpperCAmelCase):
os.makedirs(__UpperCAmelCase)
else:
a_ = save_path + "_" + get_timestamp()
os.makedirs(__UpperCAmelCase)
a_ = save_path
a_ = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__UpperCAmelCase))
a_ = loop_post_process(__UpperCAmelCase)
for iter, transformed_img in enumerate(self._optimize_CLIP(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)):
if show_intermediate:
show_pil(__UpperCAmelCase)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png'''))
if self.log:
wandb.log({"Image": wandb.Image(__UpperCAmelCase)})
if show_final:
show_pil(__UpperCAmelCase)
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''')) | 243 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_SCREAMING_SNAKE_CASE = threading.Lock()
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_SCREAMING_SNAKE_CASE = logging.WARNING
_SCREAMING_SNAKE_CASE = True
def snake_case ( ) -> Any:
_A = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case__)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def snake_case ( ) -> str:
return __name__.split(""".""")[0]
def snake_case ( ) -> logging.Logger:
return logging.getLogger(_get_library_name())
def snake_case ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_A = logging.StreamHandler() # Set sys.stderr as stream.
_A = sys.stderr.flush
# Apply our default configuration to the library root logger.
_A = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
_A = False
def snake_case ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
_A = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_A = None
def snake_case ( ) -> Optional[int]:
return log_levels
def snake_case ( snake_case__ :Optional[str] = None) -> logging.Logger:
if name is None:
_A = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case__)
def snake_case ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def snake_case ( snake_case__ :int) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case__)
def snake_case ( ) -> List[str]:
return set_verbosity(snake_case__)
def snake_case ( ) -> List[str]:
return set_verbosity(snake_case__)
def snake_case ( ) -> str:
return set_verbosity(snake_case__)
def snake_case ( ) -> Dict:
return set_verbosity(snake_case__)
def snake_case ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def snake_case ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def snake_case ( snake_case__ :logging.Handler) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case__)
def snake_case ( snake_case__ :logging.Handler) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case__)
def snake_case ( ) -> None:
_configure_library_root_logger()
_A = False
def snake_case ( ) -> None:
_configure_library_root_logger()
_A = True
def snake_case ( ) -> None:
_A = _get_library_root_logger().handlers
for handler in handlers:
_A = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""")
handler.setFormatter(snake_case__)
def snake_case ( ) -> None:
_A = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case__)
def snake_case ( self :str , *snake_case__ :Optional[Any] , **snake_case__ :List[str]) -> Union[str, Any]:
_A = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , snake_case__)
if no_advisory_warnings:
return
self.warning(*snake_case__ , **snake_case__)
_SCREAMING_SNAKE_CASE = warning_advice
@functools.lru_cache(snake_case__)
def snake_case ( self :List[str] , *snake_case__ :int , **snake_case__ :List[Any]) -> List[Any]:
self.warning(*snake_case__ , **snake_case__)
_SCREAMING_SNAKE_CASE = warning_once
class a :
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[str]: # pylint: disable=unused-argument
_A = args[0] if args else None
def __iter__( self ) -> Optional[Any]:
return iter(self._iterator )
def __getattr__( self , lowerCAmelCase_ ) -> Any:
def empty_fn(*lowerCAmelCase_ , **lowerCAmelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[int]:
return self
def __exit__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
return
class a :
"""simple docstring"""
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
_A = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_SCREAMING_SNAKE_CASE = _tqdm_cls()
def snake_case ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active)
def snake_case ( ) -> List[str]:
global _tqdm_active
_A = True
hf_hub_utils.enable_progress_bars()
def snake_case ( ) -> Optional[int]:
global _tqdm_active
_A = False
hf_hub_utils.disable_progress_bars()
| 367 | import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
_SCREAMING_SNAKE_CASE = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case ( snake_case__ :List[str] , snake_case__ :Dict) -> str:
_A = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_A = int(re.match(R""".*layer_(\d*).*""" , snake_case__)[1])
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case ( snake_case__ :Tuple) -> int:
if dtype == torch.bool:
return 1 / 8
_A = re.search(R"""[^\d](\d+)$""" , str(snake_case__))
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''')
_A = int(bit_search.groups()[0])
return bit_size // 8
def snake_case ( snake_case__ :Dict , snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :List[Any]) -> List[str]:
# Construct model
if bloom_config_file == "":
_A = BloomConfig()
else:
_A = BloomConfig.from_json_file(snake_case__)
if shard_model:
_A = os.listdir(snake_case__)
_A = sorted(filter(lambda snake_case__: s.startswith("""layer""") and "model_00" in s , snake_case__))
_A = {"""weight_map""": {}, """metadata""": {}}
_A = 0
_A = None
_A = BloomConfig()
for j, file in enumerate(snake_case__):
print("""Processing file: {}""".format(snake_case__))
_A = None
for i in range(snake_case__):
# load all TP files
_A = file.replace("""model_00""" , F'''model_0{i}''')
_A = torch.load(os.path.join(snake_case__ , snake_case__) , map_location="""cpu""")
# Rename keys in the transformers names
_A = list(temp.keys())
for key in keys:
_A = temp.pop(snake_case__)
if tensors is None:
_A = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
_A = torch.cat([tensors[key], temp[key]] , dim=snake_case__)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
_A = tensors[key] / pretraining_tp
torch.save(
snake_case__ , os.path.join(
snake_case__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1).zfill(5) , str(len(snake_case__)).zfill(5)) , ) , )
for key in tensors.keys():
_A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype)
if key not in index_dict["weight_map"]:
_A = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1).zfill(5) , str(len(snake_case__)).zfill(5))
_A = BloomConfig()
_A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A = total_size
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
with open(os.path.join(snake_case__ , WEIGHTS_NAME + """.index.json""") , """w""" , encoding="""utf-8""") as f:
_A = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__) + """\n"""
f.write(snake_case__)
else:
_A = BloomModel(snake_case__)
_A = os.listdir(snake_case__)
_A = sorted(filter(lambda snake_case__: s.startswith("""layer""") and "model_00" in s , snake_case__))
_A = None
for i, file in enumerate(snake_case__):
_A = None
for i in range(snake_case__):
# load all TP files
_A = file.replace("""model_00""" , F'''model_0{i}''')
_A = torch.load(os.path.join(snake_case__ , snake_case__) , map_location="""cpu""")
# Rename keys in the transformers names
_A = list(temp.keys())
for key in keys:
_A = temp.pop(snake_case__)
if tensors is None:
_A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
_A = torch.cat([tensors[key], temp[key]] , dim=snake_case__)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
_A = tensors[key] / pretraining_tp
_A = model.load_state_dict(snake_case__ , strict=snake_case__)
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_A = set(other_keys.missing_keys)
else:
_A = missing_keys.intersection(set(other_keys.missing_keys))
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(snake_case__ , exist_ok=snake_case__)
_A = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''')
if config.torch_dtype is not None:
_A = model.to(config.torch_dtype)
torch.save(model.state_dict() , snake_case__)
print(F'''Save configuration file to {pytorch_config_dump_path}''')
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 81 | 0 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : list[str] | None = None , _lowerCamelCase : dict[str, float] | None = None , _lowerCamelCase : bool = False , ):
lowercase__ : List[Any] = cipher_alphabet or [chr(_lowerCamelCase) for i in range(97 , 123)]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowercase__ : Tuple = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
lowercase__ : str = frequencies_dict
if not case_sensitive:
lowercase__ : List[str] = ciphertext.lower()
# Chi squared statistic values
lowercase__ : Any = {}
# cycle through all of the shifts
for shift in range(len(_lowerCamelCase)):
lowercase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowercase__ : Optional[int] = (alphabet_letters.index(letter.lower()) - shift) % len(
_lowerCamelCase)
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowercase__ : Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowercase__ : Dict = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ : int = decrypted_with_shift.lower().count(_lowerCamelCase)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ : Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ : Any = decrypted_with_shift.count(_lowerCamelCase)
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ : Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ : Dict = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowercase__ : List[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowerCamelCase : int) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowercase__ : int = min(
_lowerCamelCase , key=_lowerCamelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 87 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase : int = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase : Optional[Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase : str = sorted(arg_to_scheduler.keys())
UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = Path(self.hparams.output_dir )
__UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__UpperCamelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
__UpperCamelCase = config
__UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
__UpperCamelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = tokenizer
__UpperCamelCase = MODEL_MODES[mode]
if model is None:
__UpperCamelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
__UpperCamelCase = model
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler]
__UpperCamelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model
__UpperCamelCase = ['bias', 'LayerNorm.weight']
__UpperCamelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__UpperCamelCase = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
__UpperCamelCase = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__UpperCamelCase = optimizer
__UpperCamelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.validation_end(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if stage == "test":
__UpperCamelCase = len(self.test_dataloader().dataset )
else:
__UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
__UpperCamelCase = len(self.train_dataloader().dataset )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
raise NotImplementedError('You must implement this for your task' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.train_loader
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.output_dir.joinpath('best_tfmr' )
__UpperCamelCase = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class __lowerCAmelCase ( pl.Callback ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = trainer.lr_schedulers[0]['scheduler']
__UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Validation results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
rank_zero_info('***** Test results *****' )
__UpperCamelCase = trainer.callback_metrics
# Log and save results to file
__UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def A ( snake_case :Any , snake_case :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__UpperCamelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
__UpperCamelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
__UpperCamelCase = LoggingCallback()
__UpperCamelCase = {}
if args.fpaa:
__UpperCamelCase = 1_6
if args.gpus > 1:
__UpperCamelCase = 'auto'
__UpperCamelCase = 'ddp'
__UpperCamelCase = args.accumulate_grad_batches
__UpperCamelCase = None
__UpperCamelCase = 'auto'
__UpperCamelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 316 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 169 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[Any] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : Dict ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Optional[int] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 169 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs | 286 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
SCREAMING_SNAKE_CASE_:int = {"""target_lang""": """fi""", """source_lang""": """en"""}
SCREAMING_SNAKE_CASE_:Tuple = """>>zh<<"""
SCREAMING_SNAKE_CASE_:int = """Helsinki-NLP/"""
if is_torch_available():
SCREAMING_SNAKE_CASE_:int = """pt"""
elif is_tf_available():
SCREAMING_SNAKE_CASE_:str = """tf"""
else:
SCREAMING_SNAKE_CASE_:List[str] = """jax"""
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MarianTokenizer
__lowerCamelCase : Dict = False
__lowerCamelCase : List[str] = True
def _lowerCAmelCase ( self ):
super().setUp()
A : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
A : List[Any] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : List[str] = Path(self.tmpdirname )
save_json(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCamelCase__, save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
A : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return MarianTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return (
"This is a test",
"This is a test",
)
def _lowerCAmelCase ( self ):
A : Any = """</s>"""
A : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """</s>""" )
self.assertEqual(vocab_keys[1], """<unk>""" )
self.assertEqual(vocab_keys[-1], """<pad>""" )
self.assertEqual(len(lowerCamelCase__ ), 9 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def _lowerCAmelCase ( self ):
A : Any = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
A : Tuple = en_de_tokenizer(["""I am a small frog"""], return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCamelCase__, batch.input_ids[0] )
A : Any = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase__ )
A : int = [x.name for x in Path(lowerCamelCase__ ).glob("""*""" )]
self.assertIn("""source.spm""", lowerCamelCase__ )
MarianTokenizer.from_pretrained(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_tokenizer()
A : Optional[int] = tok(
["""I am a small frog""" * 1000, """I am a small frog"""], padding=lowerCamelCase__, truncation=lowerCamelCase__, return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape, (2, 512) )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_tokenizer()
A : int = tok(["""I am a tiny frog""", """I am a small frog"""], padding=lowerCamelCase__, return_tensors=lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__, lowerCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : Dict = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__, model_name="""Helsinki-NLP/opus-mt-en-de""", revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""", decode_kwargs={"""use_source_tokenizer""": True}, )
def _lowerCAmelCase ( self ):
A : Optional[Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
A : Optional[Any] = """Tämä on testi"""
A : int = """This is a test"""
A : Optional[int] = [76, 7, 2047, 2]
A : Union[str, Any] = [69, 12, 11, 940, 2]
A : str = tokenizer(lowerCamelCase__ ).input_ids
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : int = tokenizer(text_target=lowerCamelCase__ ).input_ids
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
A : str = tokenizer.decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
self.assertEqual(lowerCamelCase__, lowerCamelCase__ )
| 115 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : str = tempfile.mkdtemp()
# fmt: off
A : List[Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A : Optional[int] = dict(zip(lowerCamelCase__, range(len(lowerCamelCase__ ) ) ) )
A : Optional[Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A : Union[str, Any] = {"""unk_token""": """<unk>"""}
A : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
A : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file, """w""", encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
A : int = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : List[Any] = os.path.join(self.tmpdirname, lowerCamelCase__ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="""!""", **lowerCamelCase__ )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Optional[int] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.get_tokenizer()
A : Optional[Any] = self.get_rust_tokenizer()
A : Optional[int] = self.get_image_processor()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ )
A : Tuple = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : Tuple = self.get_image_processor(do_normalize=lowerCamelCase__ )
A : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = self.get_image_processor()
A : str = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Optional[Any] = self.prepare_image_inputs()
A : Optional[Any] = image_processor(lowerCamelCase__, return_tensors="""np""" )
A : Any = processor(images=lowerCamelCase__, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : int = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : Optional[int] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = """lower newer"""
A : Union[str, Any] = processor(text=lowerCamelCase__, return_tensors="""np""" )
A : str = tokenizer(lowerCamelCase__, return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist() )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : int = self.get_tokenizer()
A : str = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : List[str] = """lower newer"""
A : Any = self.prepare_image_inputs()
A : Tuple = processor(text=lowerCamelCase__, images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : str = """google/owlvit-base-patch32"""
A : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[int] = processor(text=lowerCamelCase__ )
A : Any = 16
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Tuple = """google/owlvit-base-patch32"""
A : Any = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : int = [["""cat""", """nasa badge"""], ["""person"""]]
A : List[Any] = processor(text=lowerCamelCase__ )
A : Dict = 16
A : List[str] = len(lowerCamelCase__ )
A : List[str] = max([len(lowerCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Dict = """google/owlvit-base-patch32"""
A : int = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
A : str = ["""cat""", """nasa badge"""]
A : Optional[Any] = processor(text=lowerCamelCase__ )
A : int = 16
A : Optional[Any] = inputs["""input_ids"""]
A : Optional[int] = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape, (2, seq_length) )
self.assertListEqual(list(input_ids[0] ), predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ), predicted_ids[1] )
def _lowerCAmelCase ( self ):
A : Tuple = self.get_image_processor()
A : Dict = self.get_tokenizer()
A : Optional[Any] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = self.prepare_image_inputs()
A : Optional[Any] = self.prepare_image_inputs()
A : List[str] = processor(images=lowerCamelCase__, query_images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def _lowerCAmelCase ( self ):
A : Any = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : List[str] = OwlViTProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ )
A : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Optional[Any] = processor.batch_decode(lowerCamelCase__ )
A : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
| 115 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE : Dict = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE : List[str] = None
def UpperCamelCase_( ) -> Optional[Any]:
_lowercase : Union[str, Any] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=a__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=a__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowercase : List[str] = bool(qa['answers']['text'] )
return qid_to_has_ans
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
def remove_articles(lowerCamelCase_ ):
return ARTICLES_REGEX.sub(' ' , a__ )
def white_space_fix(lowerCamelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ ):
_lowercase : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if not s:
return []
return normalize_answer(a__ ).split()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
return int(normalize_answer(a__ ) == normalize_answer(a__ ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Union[str, Any] = get_tokens(a__ )
_lowercase : int = get_tokens(a__ )
_lowercase : Any = collections.Counter(a__ ) & collections.Counter(a__ )
_lowercase : str = sum(common.values() )
if len(a__ ) == 0 or len(a__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowercase : str = 1.0 * num_same / len(a__ )
_lowercase : int = 1.0 * num_same / len(a__ )
_lowercase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Optional[int] = {}
_lowercase : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowercase : List[str] = qa['id']
_lowercase : List[Any] = [t for t in qa['answers']['text'] if normalize_answer(a__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowercase : List[Any] = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
_lowercase : int = preds[qid]
# Take max over all gold answers
_lowercase : str = max(compute_exact(a__ , a__ ) for a in gold_answers )
_lowercase : Dict = max(compute_fa(a__ , a__ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Dict = {}
for qid, s in scores.items():
_lowercase : int = na_probs[qid] > na_prob_thresh
if pred_na:
_lowercase : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
_lowercase : Optional[Any] = s
return new_scores
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> str:
if not qid_list:
_lowercase : str = len(a__ )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values() ) / total),
('f1', 1_00.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
_lowercase : List[str] = len(a__ )
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
for k in new_eval:
_lowercase : Tuple = new_eval[k]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
plt.step(a__ , a__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(a__ , a__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(a__ )
plt.savefig(a__ )
plt.clf()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None ) -> Optional[Any]:
_lowercase : int = sorted(a__ , key=lambda lowerCamelCase_ : na_probs[k] )
_lowercase : Any = 0.0
_lowercase : str = 1.0
_lowercase : Tuple = 0.0
_lowercase : Any = [1.0]
_lowercase : Optional[Any] = [0.0]
_lowercase : str = 0.0
for i, qid in enumerate(a__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowercase : List[Any] = true_pos / float(i + 1 )
_lowercase : str = true_pos / float(a__ )
if i == len(a__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(a__ )
recalls.append(a__ )
if out_image:
plot_pr_curve(a__ , a__ , a__ , a__ )
return {"ap": 1_00.0 * avg_prec}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
if out_image_dir and not os.path.exists(a__ ):
os.makedirs(a__ )
_lowercase : Dict = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowercase : Optional[Any] = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
_lowercase : Optional[Any] = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
_lowercase : Union[str, Any] = {k: float(a__ ) for k, v in qid_to_has_ans.items()}
_lowercase : Optional[int] = make_precision_recall_eval(
a__ , a__ , a__ , a__ , out_image=os.path.join(a__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(a__ , a__ , 'pr_exact' )
merge_eval(a__ , a__ , 'pr_f1' )
merge_eval(a__ , a__ , 'pr_oracle' )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not qid_list:
return
_lowercase : int = [na_probs[k] for k in qid_list]
_lowercase : Union[str, Any] = np.ones_like(a__ ) / float(len(a__ ) )
plt.hist(a__ , weights=a__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(a__ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Optional[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowercase : str = num_no_ans
_lowercase : Dict = cur_score
_lowercase : List[Any] = 0.0
_lowercase : Tuple = sorted(a__ , key=lambda lowerCamelCase_ : na_probs[k] )
for i, qid in enumerate(a__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowercase : List[Any] = scores[qid]
else:
if preds[qid]:
_lowercase : Dict = -1
else:
_lowercase : Tuple = 0
cur_score += diff
if cur_score > best_score:
_lowercase : Optional[Any] = cur_score
_lowercase : str = na_probs[qid]
return 1_00.0 * best_score / len(a__ ), best_thresh
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
_lowercase , _lowercase : int = find_best_thresh(a__ , a__ , a__ , a__ )
_lowercase , _lowercase : Any = find_best_thresh(a__ , a__ , a__ , a__ )
_lowercase : List[Any] = best_exact
_lowercase : Tuple = exact_thresh
_lowercase : Optional[Any] = best_fa
_lowercase : Tuple = fa_thresh
def UpperCamelCase_( ) -> Union[str, Any]:
with open(OPTS.data_file ) as f:
_lowercase : Tuple = json.load(a__ )
_lowercase : Optional[Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
_lowercase : str = json.load(a__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowercase : Optional[int] = json.load(a__ )
else:
_lowercase : List[str] = {k: 0.0 for k in preds}
_lowercase : Union[str, Any] = make_qid_to_has_ans(a__ ) # maps qid to True/False
_lowercase : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
_lowercase : int = [k for k, v in qid_to_has_ans.items() if not v]
_lowercase , _lowercase : Optional[int] = get_raw_scores(a__ , a__ )
_lowercase : List[str] = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
_lowercase : Any = apply_no_ans_threshold(a__ , a__ , a__ , OPTS.na_prob_thresh )
_lowercase : Dict = make_eval_dict(a__ , a__ )
if has_ans_qids:
_lowercase : Union[str, Any] = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , 'HasAns' )
if no_ans_qids:
_lowercase : List[Any] = make_eval_dict(a__ , a__ , qid_list=a__ )
merge_eval(a__ , a__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(a__ , a__ , a__ , a__ , a__ , a__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(a__ , a__ , a__ , a__ , a__ , OPTS.out_image_dir )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(a__ , a__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(a__ , a__ )
else:
print(json.dumps(a__ , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 21 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE = replicate(_A )
__SCREAMING_SNAKE_CASE = shard(_A )
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-2'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='scheduler' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
_A , scheduler=_A , revision='bf16' , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE = scheduler_params
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_A )
__SCREAMING_SNAKE_CASE = replicate(_A )
__SCREAMING_SNAKE_CASE = shard(_A )
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.split(_A , jax.device_count() )
__SCREAMING_SNAKE_CASE = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 257 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
A : Optional[Any] = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
__lowerCamelCase : List[Any] = OPTConfig
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[Any] = "gelu"
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=99, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=20, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=16, lowerCamelCase__=16, ):
A : Optional[Any] = parent
A : List[str] = batch_size
A : List[Any] = seq_length
A : Union[str, Any] = is_training
A : Union[str, Any] = use_labels
A : Union[str, Any] = vocab_size
A : List[str] = hidden_size
A : List[str] = num_hidden_layers
A : List[Any] = num_attention_heads
A : List[Any] = intermediate_size
A : Optional[Any] = hidden_act
A : List[Any] = hidden_dropout_prob
A : Optional[int] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : int = eos_token_id
A : Optional[int] = pad_token_id
A : Tuple = bos_token_id
A : Optional[Any] = embed_dim
A : Optional[Any] = word_embed_proj_dim
A : Optional[int] = False
def _lowerCAmelCase ( self ):
A : Dict = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
A : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
A : List[Any] = tf.concat([input_ids, eos_tensor], axis=1 )
A : str = self.config_cls(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=__a, **self.config_updates, )
A : List[str] = prepare_opt_inputs_dict(__a, __a )
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
A : int = TFOPTModel(config=__a )
A : Optional[Any] = inputs_dict["""input_ids"""]
A : List[Any] = input_ids[:1, :]
A : Tuple = inputs_dict["""attention_mask"""][:1, :]
A : Any = 1
# first forward pass
A : Optional[Any] = model(__a, attention_mask=__a, use_cache=__a )
A , A : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A : Union[str, Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
A : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
A : Optional[int] = tf.concat([input_ids, next_tokens], axis=-1 )
A : List[str] = tf.concat([attention_mask, next_attn_mask], axis=-1 )
A : str = model(__a, attention_mask=__a )[0]
A : Dict = model(__a, attention_mask=__a, past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
A : List[Any] = int(ids_tensor((1,), output_from_past.shape[-1] ) )
A : str = output_from_no_past[:, -3:, random_slice_idx]
A : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a, __a, rtol=1e-3 )
@require_tf
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase : Any = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase : Union[str, Any] = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : Dict = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : List[str] = 10
def _lowerCAmelCase ( self ):
A : List[str] = TFOPTModelTester(self )
A : Union[str, Any] = ConfigTester(self, config_class=__a )
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def _lowerCAmelCase ( self ):
A , A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase__, lowerCamelCase__ ):
if hasattr(__a, """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a, """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
A : Any = model_class(config=__a )
A : List[Any] = _get_word_embedding_weight(__a, model.get_input_embeddings() )
A : Dict = _get_word_embedding_weight(__a, model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
A : Tuple = _get_word_embedding_weight(__a, model.get_input_embeddings() )
A : int = _get_word_embedding_weight(__a, model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
A : Optional[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], __a )
# check that weights remain the same after resizing
A : Any = True
for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A : Any = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], __a )
A : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A : Union[str, Any] = False
self.assertTrue(__a )
def __UpperCamelCase ( _lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return tf.constant(__snake_case , dtype=tf.intaa )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = 99
def _lowerCAmelCase ( self ):
A : int = tf.ones((4, 1), dtype=tf.intaa ) * 2
A : Dict = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 )
A : Tuple = input_ids.shape[0]
A : Optional[int] = OPTConfig(
vocab_size=self.vocab_size, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Any = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
A : List[Any] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
A : List[Any] = tf.not_equal(__a, model.config.pad_token_id )
with tf.GradientTape():
A : Optional[Any] = model(input_ids=__a, attention_mask=__a ).last_hidden_state
A : Dict = (1, 11, 512)
self.assertEqual(output.shape, __a )
A : int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3], __a, atol=4e-3 ) )
A : Dict = tf.function(__a, jit_compile=__a )
A : str = xla_generate(__a, __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3], __a, atol=4e-2 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
super().setUp()
A : Union[str, Any] = """facebook/opt-350m"""
def _lowerCAmelCase ( self ):
A : str = TFOPTForCausalLM.from_pretrained(self.path_model )
A : Optional[int] = GPTaTokenizer.from_pretrained(self.path_model )
A : Any = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
A : Optional[int] = tokenizer(__a, return_tensors="""tf""", padding=__a, add_special_tokens=__a )
A : List[Any] = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
A : str = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(__a, __a, atol=1e-4 ) )
A : List[str] = tf.function(__a, jit_compile=__a )
A : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
self.assertTrue(np.allclose(__a, __a, atol=1e-4 ) )
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCAmelCase ( self ):
A : Optional[int] = """facebook/opt-125m"""
A : Optional[Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
A : Optional[int] = []
A : Union[str, Any] = GPTaTokenizer.from_pretrained(__a )
A : Optional[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
A : Optional[Any] = tokenizer(__a, return_tensors="""tf""" ).input_ids
A : int = model.generate(__a, max_length=10 )
A : str = tokenizer.batch_decode(__a, skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a, __a )
def _lowerCAmelCase ( self ):
A : Optional[Any] = """facebook/opt-350m"""
A : Any = GPTaTokenizer.from_pretrained(__a )
A : Optional[int] = TFOPTForCausalLM.from_pretrained(__a )
A : List[Any] = """left"""
# use different length sentences to test batching
A : List[Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A : str = tokenizer(__a, return_tensors="""tf""", padding=__a )
A : Optional[int] = inputs["""input_ids"""]
A : Optional[int] = model.generate(input_ids=__a, attention_mask=inputs["""attention_mask"""] )
A : Tuple = tokenizer(sentences[0], return_tensors="""tf""" ).input_ids
A : Optional[Any] = model.generate(input_ids=__a )
A : Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1], tf.intaa ) )
A : Optional[Any] = tokenizer(sentences[1], return_tensors="""tf""" ).input_ids
A : int = model.generate(input_ids=__a, max_length=model.config.max_length - num_paddings )
A : Optional[Any] = tokenizer.batch_decode(__a, skip_special_tokens=__a )
A : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=__a )
A : Optional[int] = tokenizer.decode(output_padded[0], skip_special_tokens=__a )
A : List[Any] = [
"""Hello, my dog is a little bit of a dork.\nI\'m a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a, __a )
self.assertListEqual(__a, [non_padded_sentence, padded_sentence] )
def _lowerCAmelCase ( self ):
A : List[str] = """facebook/opt-350m"""
A : Any = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
A : str = []
A : Optional[int] = GPTaTokenizer.from_pretrained(__a )
A : Tuple = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
A : Optional[int] = tokenizer(__a, return_tensors="""tf""" ).input_ids
A : str = model.generate(__a, max_length=10 )
A : List[str] = tokenizer.batch_decode(__a, skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a, __a )
| 362 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Dict = "conditional_detr"
__lowerCamelCase : str = ["past_key_values"]
__lowerCamelCase : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=3, lowerCamelCase__=300, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=6, lowerCamelCase__=2048, lowerCamelCase__=8, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=True, lowerCamelCase__="relu", lowerCamelCase__=256, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=1.0, lowerCamelCase__=False, lowerCamelCase__="sine", lowerCamelCase__="resnet50", lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=1, lowerCamelCase__=2, lowerCamelCase__=5, lowerCamelCase__=2, lowerCamelCase__=0.25, **lowerCamelCase__, ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
A : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__, lowerCamelCase__ ):
A : Any = backbone_config.get("""model_type""" )
A : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
A : Tuple = config_class.from_dict(lowerCamelCase__ )
A : Dict = use_timm_backbone
A : int = backbone_config
A : Union[str, Any] = num_channels
A : Optional[Any] = num_queries
A : Union[str, Any] = d_model
A : str = encoder_ffn_dim
A : List[Any] = encoder_layers
A : Tuple = encoder_attention_heads
A : Union[str, Any] = decoder_ffn_dim
A : Tuple = decoder_layers
A : int = decoder_attention_heads
A : Union[str, Any] = dropout
A : List[str] = attention_dropout
A : Optional[int] = activation_dropout
A : Optional[Any] = activation_function
A : Any = init_std
A : List[Any] = init_xavier_std
A : Any = encoder_layerdrop
A : List[str] = decoder_layerdrop
A : int = encoder_layers
A : Union[str, Any] = auxiliary_loss
A : Union[str, Any] = position_embedding_type
A : Tuple = backbone
A : Dict = use_pretrained_backbone
A : int = dilation
# Hungarian matcher
A : List[Any] = class_cost
A : List[Any] = bbox_cost
A : int = giou_cost
# Loss coefficients
A : List[Any] = mask_loss_coefficient
A : Any = dice_loss_coefficient
A : int = cls_loss_coefficient
A : Tuple = bbox_loss_coefficient
A : List[Any] = giou_loss_coefficient
A : int = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__, **lowerCamelCase__ )
@property
def _lowerCAmelCase ( self ):
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
return self.d_model
def _lowerCAmelCase ( self ):
A : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A : List[Any] = self.backbone_config.to_dict()
A : List[str] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = version.parse("1.11" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self ):
return 1e-5
@property
def _lowerCAmelCase ( self ):
return 12
| 115 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : str = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
for attribute in key.split(""".""" ):
lowerCamelCase = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowerCamelCase = getattr(snake_case__ , snake_case__ ).shape
else:
lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase = value
elif weight_type == "weight_g":
lowerCamelCase = value
elif weight_type == "weight_v":
lowerCamelCase = value
elif weight_type == "bias":
lowerCamelCase = value
else:
lowerCamelCase = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = []
lowerCamelCase = fairseq_model.state_dict()
lowerCamelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
lowerCamelCase = True
if "*" in mapped_key:
lowerCamelCase = name.split(snake_case__ )[0].split(""".""" )[-2]
lowerCamelCase = mapped_key.replace("""*""" , snake_case__ )
if "weight_g" in name:
lowerCamelCase = """weight_g"""
elif "weight_v" in name:
lowerCamelCase = """weight_v"""
elif "weight" in name:
lowerCamelCase = """weight"""
elif "bias" in name:
lowerCamelCase = """bias"""
else:
lowerCamelCase = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
lowerCamelCase = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase = name.split(""".""" )
lowerCamelCase = int(items[0] )
lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def a__ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=True ) -> str:
if config_path is not None:
lowerCamelCase = HubertConfig.from_pretrained(snake_case__ )
else:
lowerCamelCase = HubertConfig()
if is_finetuned:
if dict_path:
lowerCamelCase = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase = target_dict.pad_index
lowerCamelCase = target_dict.bos_index
lowerCamelCase = target_dict.eos_index
lowerCamelCase = len(target_dict.symbols )
lowerCamelCase = os.path.join(snake_case__ , """vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
lowerCamelCase = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case__ , )
lowerCamelCase = True if config.feat_extract_norm == """layer""" else False
lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
lowerCamelCase = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
lowerCamelCase = HubertForCTC(snake_case__ )
else:
lowerCamelCase = HubertModel(snake_case__ )
if is_finetuned:
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase : int = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 355 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCamelCase = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCamelCase = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self ):
"""simple docstring"""
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = True
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 168 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
snake_case_ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowerCamelCase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowerCamelCase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowerCamelCase__ )
return parser.parse_args()
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = parse_args()
# Import training_script as a module.
snake_case_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ = script_fpath.stem
snake_case_ = importlib.import_module(lowerCamelCase__ )
# Patch sys.argv
snake_case_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 159 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
super().__init__(*lowercase , **lowercase )
lowerCamelCase_ = eval_examples
lowerCamelCase_ = post_process_function
def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ) -> Dict[str, float]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCamelCase_ = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCamelCase_ = gen_kwargs
lowerCamelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase_ = self.get_eval_dataloader(lowercase )
lowerCamelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
else:
lowerCamelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase )
return metrics
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ) -> Union[str, Any]:
lowerCamelCase_ = gen_kwargs.copy()
lowerCamelCase_ = self.get_test_dataloader(lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase_ = self.compute_metrics
lowerCamelCase_ = None
lowerCamelCase_ = time.time()
lowerCamelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase_ = eval_loop(
lowercase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , )
finally:
lowerCamelCase_ = compute_metrics
lowerCamelCase_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase_ = self.post_process_function(lowercase , lowercase , lowercase , "predict" )
lowerCamelCase_ = self.compute_metrics(lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowerCamelCase_ = metrics.pop(lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
| 19 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""DeiTFeatureExtractor"""]
lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 35 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case : List[Any] = logging.get_logger(__name__)
class A__(a_ ):
"""simple docstring"""
_A : Optional[Any] = ['''pixel_values''']
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BICUBIC , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = None , _lowercase = None , _lowercase = True , **_lowercase , ) -> None:
super().__init__(**_lowercase )
a_ : Optional[Any] = size if size is not None else {"""height""": 384, """width""": 384}
a_ : List[str] = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : str = do_resize
a_ : Optional[int] = size
a_ : Dict = resample
a_ : Optional[int] = do_rescale
a_ : Dict = rescale_factor
a_ : int = do_normalize
a_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
a_ : Any = do_convert_rgb
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BICUBIC , _lowercase = None , **_lowercase , ) -> np.ndarray:
a_ : Union[str, Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
a_ : List[str] = (size["""height"""], size["""width"""])
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> Optional[Any]:
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , ) -> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , ) -> PIL.Image.Image:
a_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a_ : Any = resample if resample is not None else self.resample
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
a_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
a_ : Optional[Any] = image_std if image_std is not None else self.image_std
a_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a_ : str = size if size is not None else self.size
a_ : Tuple = get_size_dict(_lowercase , default_to_square=_lowercase )
a_ : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a_ : Optional[Any] = [convert_to_rgb(_lowercase ) for image in images]
# All transformations expect numpy arrays.
a_ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
a_ : Optional[int] = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
a_ : Union[str, Any] = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
a_ : str = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
a_ : Optional[Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
a_ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=_lowercase )
return encoded_outputs
| 248 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase_ = '''bert-base-cased'''
UpperCamelCase_ = '''fp16'''
UpperCamelCase_ = '''bf16'''
UpperCamelCase_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: List[Any] ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : Dict = dict(
ACCELERATE_USE_FSDP="""true""" ,MASTER_ADDR="""localhost""" ,MASTER_PORT="""10999""" ,RANK="""0""" ,LOCAL_RANK="""0""" ,WORLD_SIZE="""1""" ,)
def A__ ( self: Union[str, Any] ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = self.dist_env.copy()
UpperCAmelCase_ : int = F'''{i + 1}'''
UpperCAmelCase_ : Optional[int] = strategy
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def A__ ( self: List[Any] ) -> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : int = self.dist_env.copy()
UpperCAmelCase_ : str = prefetch_policy
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def A__ ( self: List[str] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : List[str] = self.dist_env.copy()
UpperCAmelCase_ : Any = state_dict_type
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ : Tuple = AutoModel.from_pretrained(lowerCamelCase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase_ : Tuple = self.dist_env.copy()
UpperCAmelCase_ : Any = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase_ : str = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase_ : Tuple = """2000"""
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase_ : Dict = self.dist_env.copy()
UpperCAmelCase_ : List[str] = """TRANSFORMER_BASED_WRAP"""
UpperCAmelCase_ : str = """T5Layer"""
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
UpperCAmelCase_ : Optional[int] = self.dist_env.copy()
UpperCAmelCase_ : int = """SIZE_BASED_WRAP"""
UpperCAmelCase_ : List[Any] = """0"""
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def A__ ( self: Dict ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase_ : List[str] = self.dist_env.copy()
UpperCAmelCase_ : List[str] = mp_dtype
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : int = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase_ : List[Any] = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase_ : Optional[Any] = torch.bfloataa
UpperCAmelCase_ : Dict = MixedPrecision(param_dtype=lowerCamelCase_ ,reduce_dtype=lowerCamelCase_ ,buffer_dtype=lowerCamelCase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,lowerCamelCase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,lowerCamelCase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase_ : Any = self.dist_env.copy()
UpperCAmelCase_ : Dict = str(lowerCamelCase_ ).lower()
with mockenv_context(**lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=lowerCamelCase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : List[str] = 0.8_2
UpperCAmelCase_ : List[str] = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
UpperCAmelCase_ : List[str] = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase_ : List[Any] = 160
UpperCAmelCase_ : str = 160
UpperCAmelCase_ : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : str = os.path.join(self.test_scripts_folder ,"""test_performance.py""" )
UpperCAmelCase_ : Optional[int] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
UpperCAmelCase_ : int = cmd.copy()
for i, strategy in enumerate(lowerCamelCase_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
def A__ ( self: List[Any] ) -> Dict:
UpperCAmelCase_ : Dict = os.path.join(self.test_scripts_folder ,"""test_checkpointing.py""" )
UpperCAmelCase_ : Tuple = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(lowerCamelCase_ ):
UpperCAmelCase_ : Any = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase_ : List[Any] = len(lowerCamelCase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase_ : Union[str, Any] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
UpperCAmelCase_ : Union[str, Any] = cmd_config[:-1]
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdir ,"""epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.test_scripts_folder ,"""test_peak_memory_usage.py""" )
UpperCAmelCase_ : List[Any] = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase_ : Union[str, Any] = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(lowerCamelCase_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 59 |
import os
def lowerCamelCase_ ( _a : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(_a ) , _a ) ) as input_file:
UpperCAmelCase_ : Dict = [
[int(_a ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase_ : Any = len(_a )
UpperCAmelCase_ : Tuple = len(matrix[0] )
UpperCAmelCase_ : Optional[int] = [[-1 for _ in range(_a )] for _ in range(_a )]
for i in range(_a ):
UpperCAmelCase_ : Optional[Any] = matrix[i][0]
for j in range(1 , _a ):
for i in range(_a ):
UpperCAmelCase_ : str = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _a ):
UpperCAmelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase_ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"{solution() = }")
| 59 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} )
snake_case = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
snake_case = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
snake_case = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ):
'''simple docstring'''
_A = input_size
_A = mask_patch_size
_A = model_patch_size
_A = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_A = self.input_size // self.mask_patch_size
_A = self.mask_patch_size // self.model_patch_size
_A = self.rand_size**2
_A = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Any ):
'''simple docstring'''
_A = np.random.permutation(self.token_count )[: self.mask_count]
_A = np.zeros(self.token_count , dtype=__UpperCAmelCase )
_A = 1
_A = mask.reshape((self.rand_size, self.rand_size) )
_A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = torch.stack([example["pixel_values"] for example in examples] )
_A = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
_A = ds["train"].train_test_split(data_args.train_val_split )
_A = split["train"]
_A = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase )
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowercase , "decoder_type" ):
_A = "simmim"
# adapt config
_A = model_args.image_size if model_args.image_size is not None else config.image_size
_A = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_A = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
_A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
_A = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_A = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_A = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_A = AutoModelForMaskedImageModeling.from_config(__lowercase )
if training_args.do_train:
_A = ds["train"].column_names
else:
_A = ds["validation"].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = "image"
elif "img" in column_names:
_A = "img"
else:
_A = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_A = Compose(
[
Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_A = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__lowercase ):
_A = [transforms(__lowercase ) for image in examples[image_column_name]]
_A = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_A = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 79 | 0 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=10 , UpperCamelCase=3 , UpperCamelCase=2 , UpperCamelCase=2 , UpperCamelCase=2 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=10 , UpperCamelCase=0.02 , UpperCamelCase=0.9 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = tubelet_size
lowerCamelCase_ = num_frames
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = mask_ratio
lowerCamelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCamelCase_ = int(mask_ratio * self.seq_length )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.batch_size , -1 ).bool()
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
lowerCamelCase_ = mask.sum().item()
lowerCamelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_lowerCamelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCamelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCamelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCamelCase_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowerCamelCase_ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCamelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowerCamelCase_ = len(UpperCamelCase )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
lowerCamelCase_ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
lowerCamelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCamelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( ):
lowerCamelCase_ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
lowerCamelCase_ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(UpperCamelCase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_video()
lowerCamelCase_ = image_processor(UpperCamelCase , return_tensors="pt" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
lowerCamelCase_ = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
lowerCamelCase_ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
# verify the logits
lowerCamelCase_ = torch.Size([1, 1408, 1536] )
lowerCamelCase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCamelCase_ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCamelCase_ = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCamelCase )
lowerCamelCase_ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 55 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> bool:
'''simple docstring'''
if num < 0:
return False
__UpperCamelCase : int = num
__UpperCamelCase : int = 0
while num > 0:
__UpperCamelCase : List[str] = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod() | 351 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]) -> Any:
'''simple docstring'''
__UpperCamelCase : str = set()
__UpperCamelCase : Optional[Any] = []
def parse_line(_lowerCamelCase : Tuple):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Tuple = line.decode("UTF-8")
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" "):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase) > 0:
__UpperCamelCase : Optional[Any] = "\n".join(_lowerCamelCase)
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets):
selected_warnings.add(_lowerCamelCase)
buffer.clear()
continue
else:
__UpperCamelCase : Optional[Any] = line.strip()
buffer.append(_lowerCamelCase)
if from_gh:
for filename in os.listdir(_lowerCamelCase):
__UpperCamelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase)
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
else:
try:
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase) as fp:
parse_line(_lowerCamelCase)
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.')
return selected_warnings
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = set()
__UpperCamelCase : str = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if (p.endswith(".zip") or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase))
return selected_warnings
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
return values.split(",")
lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Any = extract_warnings(args.output_dir, args.targets)
lowercase : int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 151 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = MgpstrTokenizer
lowercase__ : int = False
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __magic_name__ ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
__magic_name__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__magic_name__ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = """tester"""
__magic_name__ : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def __magic_name__ ( self ) -> str:
pass
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Dict = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ : List[str] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
__magic_name__ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ ,__magic_name__ : Optional[Any] = self.get_input_output_texts(lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
__magic_name__ : Optional[int] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def __magic_name__ ( self ) -> Tuple:
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
| 342 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: int = "▁"
__magic_name__: List[str] = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__: List[str] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
__magic_name__: Tuple = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
__magic_name__: int = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = ['''input_ids''', '''attention_mask''']
lowercase__ : List[int] = []
lowercase__ : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
__magic_name__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__magic_name__ : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
__magic_name__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ : List[str] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ : List[Any] = 1
__magic_name__ : Dict = len(self.sp_model )
__magic_name__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
__magic_name__ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ : List[str] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ : List[Any] = src_lang if src_lang is not None else """eng_Latn"""
__magic_name__ : Any = self.lang_code_to_id[self._src_lang]
__magic_name__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
__magic_name__ : List[Any] = self.__dict__.copy()
__magic_name__ : int = None
__magic_name__ : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ : Any = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __magic_name__ ( self ) -> str:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __magic_name__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
__magic_name__ : Optional[int] = [1] * len(self.prefix_tokens )
__magic_name__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : str = [self.sep_token_id]
__magic_name__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ : Dict = src_lang
__magic_name__ : List[Any] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.convert_tokens_to_ids(lowerCAmelCase__ )
__magic_name__ : Tuple = tgt_lang_id
return inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ : List[str] = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , """ """ ).strip()
return out_string
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
__magic_name__ : List[str] = src_lang
__magic_name__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : str = [self.cur_lang_code]
__magic_name__ : List[Any] = [self.eos_token_id]
def __magic_name__ ( self , lowerCAmelCase__ ) -> None:
__magic_name__ : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__magic_name__ : List[str] = []
__magic_name__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__magic_name__ : Optional[int] = [self.cur_lang_code]
__magic_name__ : Union[str, Any] = [self.eos_token_id]
| 342 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase :
A__ : Optional[Any] = XGLMConfig
A__ : int = {}
A__ : Tuple = "gelu"
def __init__(self : List[str] , snake_case__ : List[str] , snake_case__ : Optional[Any]=14 , snake_case__ : Optional[Any]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Any=True , snake_case__ : Optional[Any]=99 , snake_case__ : Union[str, Any]=32 , snake_case__ : str=2 , snake_case__ : List[Any]=4 , snake_case__ : Any=37 , snake_case__ : List[Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Dict=5_12 , snake_case__ : Any=0.02 , ) -> Any:
'''simple docstring'''
snake_case : Any = parent
snake_case : Optional[Any] = batch_size
snake_case : str = seq_length
snake_case : Any = is_training
snake_case : Optional[int] = use_input_mask
snake_case : Tuple = use_labels
snake_case : int = vocab_size
snake_case : List[str] = d_model
snake_case : List[Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Tuple = ffn_dim
snake_case : Dict = activation_function
snake_case : int = activation_dropout
snake_case : List[str] = attention_dropout
snake_case : Optional[int] = max_position_embeddings
snake_case : Tuple = initializer_range
snake_case : str = None
snake_case : int = 0
snake_case : Dict = 2
snake_case : List[Any] = 1
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Optional[int]:
'''simple docstring'''
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[str]:
'''simple docstring'''
snake_case : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case : List[Any] = None
if self.use_input_mask:
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Tuple = self.get_config()
snake_case : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : str = config_and_inputs
snake_case : int = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( A_ ,A_ ,unittest.TestCase ):
A__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A__ : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A__ : List[Any] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
A__ : int = False
A__ : List[str] = False
A__ : List[str] = False
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : int = TFXGLMModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def _SCREAMING_SNAKE_CASE (self : str ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str=True ) -> int:
'''simple docstring'''
snake_case : Any = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
snake_case : List[str] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case : str = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
snake_case : str = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case : str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
snake_case : int = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
snake_case : int = tokenizer("Today is a nice day and" , return_tensors="tf" )
snake_case : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
snake_case : Union[str, Any] = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
snake_case : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
snake_case : str = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
snake_case : str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
snake_case : Any = "left"
# use different length sentences to test batching
snake_case : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
snake_case : Tuple = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ )
snake_case : Any = inputs["input_ids"]
snake_case : str = model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
snake_case : str = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
snake_case : str = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
snake_case : Dict = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
snake_case : List[Any] = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
snake_case : List[str] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
snake_case : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
snake_case : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
snake_case : Union[str, Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 10 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
'''simple docstring'''
A__ = filter(lambda SCREAMING_SNAKE_CASE__ : p.requires_grad , model.parameters() )
A__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase_ = logging.getLogger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
if metric == "rouge2":
A__ = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A__ = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A__ = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
A__ = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=snake_case__ , verbose=snake_case__ , )
class A ( pl.Callback ):
"""simple docstring"""
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : str )-> Any:
'''simple docstring'''
A__ = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def snake_case__ ( self : Tuple,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : int,lowercase_ : Tuple=True )-> str:
'''simple docstring'''
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
A__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
A__ = od / 'test_results.txt'
A__ = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A__ = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
A__ = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a,'a+' ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
A__ = metrics[key]
if isinstance(_a,torch.Tensor ):
A__ = val.item()
A__ = F'{key}: {val:.6f}\n'
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
A__ = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_a )
@rank_zero_only
def snake_case__ ( self : Any,lowercase_ : List[str],lowercase_ : Tuple )-> Union[str, Any]:
'''simple docstring'''
try:
A__ = pl_module.model.model.num_parameters()
except AttributeError:
A__ = pl_module.model.num_parameters()
A__ = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def snake_case__ ( self : Any,lowercase_ : Optional[Any],lowercase_ : Any )-> Tuple:
'''simple docstring'''
save_json(pl_module.metrics,pl_module.metrics_save_path )
return self._write_logs(_a,_a,'test' )
@rank_zero_only
def snake_case__ ( self : Optional[Any],lowercase_ : Dict,lowercase_ : List[Any] )-> Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 7 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
__A = ["input_features"]
def __init__( self : Any , lowercase : Tuple=80 , lowercase : Optional[int]=16_000 , lowercase : Optional[Any]=160 , lowercase : Optional[int]=30 , lowercase : List[Any]=400 , lowercase : Dict=0.0 , lowercase : Tuple=False , **lowercase : Optional[int] , ):
"""simple docstring"""
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
lowercase_ :Optional[int] = n_fft
lowercase_ :List[Any] = hop_length
lowercase_ :Tuple = chunk_length
lowercase_ :List[str] = chunk_length * sampling_rate
lowercase_ :Optional[Any] = self.n_samples // hop_length
lowercase_ :Any = sampling_rate
lowercase_ :List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm="slaney" , mel_scale="slaney" , )
def lowercase__ ( self : str , lowercase : np.array ):
"""simple docstring"""
lowercase_ :Any = spectrogram(
lowercase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase_ :Any = log_spec[:, :-1]
lowercase_ :List[Any] = np.maximum(lowercase , log_spec.max() - 8.0 )
lowercase_ :Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( lowercase : List[np.ndarray] , lowercase : List[np.ndarray] , lowercase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
lowercase_ :Optional[int] = np.array(lowercase , np.intaa )
lowercase_ :Any = []
for vector, length in zip(lowercase , attention_mask.sum(-1 ) ):
lowercase_ :Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowercase_ :List[Any] = padding_value
normed_input_values.append(lowercase )
else:
lowercase_ :List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase : bool = True , lowercase : Optional[int] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : Optional[bool] = None , lowercase : Optional[str] = "max_length" , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : Optional[bool] = None , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase_ :List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowercase_ :Optional[Any] = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowercase_ :List[Any] = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase_ :Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase_ :Optional[int] = [np.asarray([raw_speech] ).T]
lowercase_ :int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase_ :Tuple = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase_ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase_ :List[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase_ :List[str] = [self._np_extract_fbank_features(lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase ):
lowercase_ :Tuple = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
else:
lowercase_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase_ :Dict = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase_ :Tuple = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ :List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 147 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
lowercase_ :Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ , lowercase_ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ :Dict = 1
return lst
if __name__ == "__main__":
lowerCAmelCase : Any =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : List[str] =[int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 147 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : str = logging.get_logger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=False ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str]=False ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Tuple = ''
else:
lowerCAmelCase_ : Tuple = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCAmelCase_ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Any = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : int = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = dct.pop(lowerCAmelCase__ )
lowerCAmelCase_ : str = val
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=lowerCAmelCase__ , )
lowerCAmelCase_ : List[Any] = ViTHybridConfig(backbone_config=lowerCAmelCase__ , image_size=384 , num_labels=1000 )
lowerCAmelCase_ : Dict = False
# load original model from timm
lowerCAmelCase_ : List[Any] = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = 'huggingface/label-files'
lowerCAmelCase_ : Any = 'imagenet-1k-id2label.json'
lowerCAmelCase_ : List[Any] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : str = ViTHybridModel(lowerCAmelCase__ ).eval()
else:
lowerCAmelCase_ : str = ViTHybridForImageClassification(lowerCAmelCase__ ).eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowerCAmelCase_ : Any = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowerCAmelCase_ : int = transform.transforms
lowerCAmelCase_ : Dict = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase_ : List[Any] = ViTHybridImageProcessor(
do_resize=lowerCAmelCase__ , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Optional[Any] = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase_ : Optional[Any] = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
lowerCAmelCase_ : List[str] = timm_model.forward_features(lowerCAmelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase__ , outputs.pooler_output , atol=1e-3 )
else:
lowerCAmelCase_ : int = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 224 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """dpr"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : int=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1E-12 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="absolute" , SCREAMING_SNAKE_CASE_ : int = 0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : List[str] = projection_dim
lowerCAmelCase_ : List[str] = position_embedding_type
| 224 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = 'convnextv2'
def __init__( self , __a=3 , __a=4 , __a=4 , __a=None , __a=None , __a="gelu" , __a=0.02 , __a=1e-12 , __a=0.0 , __a=2_24 , __a=None , __a=None , **__a , ) -> int:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = num_channels
_UpperCamelCase = patch_size
_UpperCamelCase = num_stages
_UpperCamelCase = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_UpperCamelCase = [3, 3, 9, 3] if depths is None else depths
_UpperCamelCase = hidden_act
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = drop_path_rate
_UpperCamelCase = image_size
_UpperCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names)
| 100 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case, n - 1 )
rec_insertion_sort(__snake_case, n - 1 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCamelCase , _UpperCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case, index + 1 )
if __name__ == "__main__":
_a = input("""Enter integers separated by spaces: """)
_a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 100 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def a ( __a="ro" , __a="en" , __a="wmt16" , __a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCamelCase__ :int = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''' )
UpperCamelCase__ :Tuple = datasets.load_dataset(__a , __a )
if save_dir is None:
UpperCamelCase__ :Any = f'''{dataset}-{pair}'''
UpperCamelCase__ :Dict = Path(__a )
save_dir.mkdir(exist_ok=__a )
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
UpperCamelCase__ :Dict = '''val''' if split == '''validation''' else split
UpperCamelCase__ :List[Any] = save_dir.joinpath(f'''{fn}.source''' )
UpperCamelCase__ :int = save_dir.joinpath(f'''{fn}.target''' )
UpperCamelCase__ :Union[str, Any] = src_path.open('''w+''' )
UpperCamelCase__ :Tuple = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCamelCase__ :Union[str, Any] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset) | 97 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 92 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCAmelCase : Any = json.load(UpperCAmelCase )
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : Any = []
UpperCAmelCase : List[Any] = []
for key, info in class_info.items():
UpperCAmelCase : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(UpperCAmelCase ) )
UpperCAmelCase : Optional[Any] = thing_ids
UpperCAmelCase : Optional[Any] = class_names
return metadata
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[int], __A : int, __A : str=7, __A : Tuple=3, __A : Tuple=3_0, __A : Optional[Any]=4_0_0, __A : Tuple=None, __A : Union[str, Any]=True, __A : Union[str, Any]=True, __A : Optional[int]=[0.5, 0.5, 0.5], __A : Any=[0.5, 0.5, 0.5], __A : str=1_0, __A : Optional[Any]=False, __A : Tuple=2_5_5, __A : Any="shi-labs/oneformer_demo", __A : int="ade20k_panoptic.json", __A : Union[str, Any]=1_0, ):
UpperCAmelCase : List[str] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Any = num_channels
UpperCAmelCase : List[Any] = min_resolution
UpperCAmelCase : Optional[Any] = max_resolution
UpperCAmelCase : Optional[Any] = do_resize
UpperCAmelCase : Optional[int] = {'''shortest_edge''': 3_2, '''longest_edge''': 1_3_3_3} if size is None else size
UpperCAmelCase : Union[str, Any] = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : List[Any] = image_std
UpperCAmelCase : List[Any] = class_info_file
UpperCAmelCase : Any = prepare_metadata(__A, __A )
UpperCAmelCase : Dict = num_text
UpperCAmelCase : int = repo_path
# for the post_process_functions
UpperCAmelCase : Dict = 2
UpperCAmelCase : List[Any] = 1_0
UpperCAmelCase : Optional[int] = 1_0
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : int = num_labels
UpperCAmelCase : Optional[Any] = do_reduce_labels
UpperCAmelCase : Union[str, Any] = ignore_index
def __magic_name__ ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __magic_name__ ( self : int, __A : Optional[Any], __A : str=False ):
if not batched:
UpperCAmelCase : int = image_inputs[0]
if isinstance(__A, Image.Image ):
UpperCAmelCase , UpperCAmelCase : Tuple = image.size
else:
UpperCAmelCase , UpperCAmelCase : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : Tuple = int(self.size['''shortest_edge'''] * h / w )
UpperCAmelCase : int = self.size['''shortest_edge''']
elif w > h:
UpperCAmelCase : Optional[int] = self.size['''shortest_edge''']
UpperCAmelCase : Union[str, Any] = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCAmelCase : List[str] = self.size['''shortest_edge''']
UpperCAmelCase : Any = self.size['''shortest_edge''']
else:
UpperCAmelCase : Tuple = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : Dict = max(__A, key=lambda __A : item[0] )[0]
UpperCAmelCase : Tuple = max(__A, key=lambda __A : item[1] )[1]
return expected_height, expected_width
def __magic_name__ ( self : str ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), )
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase = image_processing_class
def __magic_name__ ( self : Dict ):
UpperCAmelCase : List[Any] = OneFormerImageProcessorTester(self )
@property
def __magic_name__ ( self : Any ):
return self.image_processing_tester.prepare_image_processor_dict()
def __magic_name__ ( self : str ):
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
self.assertTrue(hasattr(__A, '''ignore_index''' ) )
self.assertTrue(hasattr(__A, '''class_info_file''' ) )
self.assertTrue(hasattr(__A, '''num_text''' ) )
self.assertTrue(hasattr(__A, '''repo_path''' ) )
self.assertTrue(hasattr(__A, '''metadata''' ) )
self.assertTrue(hasattr(__A, '''do_reduce_labels''' ) )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : Optional[Any] ):
# Initialize image_processor
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processing_tester, equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase : int = self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processing_tester.get_expected_values(__A, batched=__A )
UpperCAmelCase : List[Any] = image_processor(
__A, ['''semantic'''] * len(__A ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def __magic_name__ ( self : List[str] ):
# Initialize image_processor
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
UpperCAmelCase : List[str] = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCAmelCase , UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(__A, batched=__A )
UpperCAmelCase : str = image_processor(
__A, ['''semantic'''] * len(__A ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def __magic_name__ ( self : Any ):
# Initialize image_processor
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processing_tester, equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
UpperCAmelCase : Dict = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
UpperCAmelCase , UpperCAmelCase : int = self.image_processing_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCAmelCase , UpperCAmelCase : Dict = self.image_processing_tester.get_expected_values(__A, batched=__A )
UpperCAmelCase : Dict = image_processor(
__A, ['''semantic'''] * len(__A ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def __magic_name__ ( self : Any, __A : str=False, __A : Union[str, Any]=False, __A : Union[str, Any]="np" ):
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase : Tuple = self.image_processing_tester.num_labels
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Any = None
UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester, equal_resolution=__A )
if with_segmentation_maps:
UpperCAmelCase : Any = num_labels
if is_instance_map:
UpperCAmelCase : Any = list(range(__A ) ) * 2
UpperCAmelCase : Any = dict(enumerate(__A ) )
UpperCAmelCase : Optional[Any] = [
np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase : Tuple = [Image.fromarray(__A ) for annotation in annotations]
UpperCAmelCase : Tuple = image_processor(
__A, ['''semantic'''] * len(__A ), __A, return_tensors='''pt''', instance_id_to_semantic_id=__A, pad_and_return_pixel_mask=__A, )
return inputs
def __magic_name__ ( self : Tuple ):
pass
def __magic_name__ ( self : Optional[Any] ):
def common(__A : Any=False, __A : int=None ):
UpperCAmelCase : List[str] = self.comm_get_image_processor_inputs(
with_segmentation_maps=__A, is_instance_map=__A, segmentation_type=__A )
UpperCAmelCase : List[Any] = inputs['''mask_labels''']
UpperCAmelCase : Tuple = inputs['''class_labels''']
UpperCAmelCase : Any = inputs['''pixel_values''']
UpperCAmelCase : Optional[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__A, __A, __A ):
self.assertEqual(mask_label.shape[0], class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] )
self.assertEqual(len(__A ), self.image_processing_tester.num_text )
common()
common(is_instance_map=__A )
common(is_instance_map=__A, segmentation_type='''pil''' )
common(is_instance_map=__A, segmentation_type='''pil''' )
def __magic_name__ ( self : int ):
UpperCAmelCase : int = np.zeros((2_0, 5_0) )
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : List[str] = 1
UpperCAmelCase : str = 1
UpperCAmelCase : List[Any] = binary_mask_to_rle(__A )
self.assertEqual(len(__A ), 4 )
self.assertEqual(rle[0], 2_1 )
self.assertEqual(rle[1], 4_5 )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=7_7, task_seq_length=7_7, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
UpperCAmelCase : str = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Optional[int] = fature_extractor.post_process_semantic_segmentation(__A )
self.assertEqual(len(__A ), self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
UpperCAmelCase : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase : int = fature_extractor.post_process_semantic_segmentation(__A, target_sizes=__A )
self.assertEqual(segmentation[0].shape, target_sizes[0] )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=7_7, task_seq_length=7_7, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
UpperCAmelCase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Tuple = image_processor.post_process_instance_segmentation(__A, threshold=0 )
self.assertTrue(len(__A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ), __A )
self.assertEqual(
el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=7_7, task_seq_length=7_7, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
UpperCAmelCase : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase : Optional[Any] = image_processor.post_process_panoptic_segmentation(__A, threshold=0 )
self.assertTrue(len(__A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ), __A )
self.assertEqual(
el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
| 99 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( UpperCAmelCase : int ) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(UpperCAmelCase : float , UpperCAmelCase : float ) -> bool:
UpperCAmelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[str] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(UpperCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : int = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Callable[[float], float] , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value)
def a__ ( UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 ) -> None:
def identity_function(UpperCAmelCase : float ) -> float:
return x
UpperCAmelCase : int = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def a__ ( UpperCAmelCase : int ) -> None:
def function_to_integrate(UpperCAmelCase : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Optional[int] = area_under_curve_estimator(
UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def A ( snake_case :int ) -> Optional[int]:
__UpperCamelCase = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=['stage2', 'stage3', 'stage4'] , )
__UpperCamelCase = DetaConfig(
backbone_config=snake_case , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=snake_case , with_box_refine=snake_case , two_stage=snake_case , )
# set labels
__UpperCamelCase = 'huggingface/label-files'
if "o365" in model_name:
__UpperCamelCase = 3_6_6
__UpperCamelCase = 'object365-id2label.json'
else:
__UpperCamelCase = 9_1
__UpperCamelCase = 'coco-detection-id2label.json'
__UpperCamelCase = num_labels
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A ( snake_case :Union[str, Any] ) -> Union[str, Any]:
__UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def A ( snake_case :List[str] , snake_case :Optional[Any] , snake_case :List[str] ) -> List[str]:
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A ( snake_case :List[Any] , snake_case :str ) -> Any:
__UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCamelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:dim, :]
__UpperCamelCase = in_proj_bias[: dim]
__UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase = in_proj_weight[
-dim :, :
]
__UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def A ( snake_case :Dict , snake_case :Dict ) -> Optional[Any]:
# transformer decoder self-attention layers
__UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCamelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:hidden_size, :]
__UpperCamelCase = in_proj_bias[:hidden_size]
__UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase = in_proj_weight[-hidden_size:, :]
__UpperCamelCase = in_proj_bias[-hidden_size:]
def A ( ) -> List[Any]:
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Any , snake_case :Union[str, Any] ) -> Any:
__UpperCamelCase = get_deta_config(snake_case )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__UpperCamelCase = torch.load(snake_case , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(snake_case , param.shape )
# rename keys
__UpperCamelCase = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_swin_q_k_v(snake_case , config.backbone_config )
read_in_decoder_q_k_v(snake_case , snake_case )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = val
if "input_proj" in key:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase = DetaForObjectDetection(snake_case )
model.load_state_dict(snake_case )
model.eval()
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(snake_case )
# load image processor
__UpperCamelCase = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__UpperCamelCase = prepare_img()
__UpperCamelCase = processor(images=snake_case , return_tensors='pt' )
__UpperCamelCase = encoding['pixel_values']
__UpperCamelCase = model(pixel_values.to(snake_case ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
__UpperCamelCase = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
__UpperCamelCase = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 316 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str:
output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , )
else:
export(
snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , )
@torch.no_grad()
def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]:
__UpperCamelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__UpperCamelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__UpperCamelCase = 'cpu'
__UpperCamelCase = Path(snake_case )
# VAE DECODER
__UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
__UpperCamelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__UpperCamelCase = vae_decoder.decode
onnx_export(
snake_case , model_args=(
torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=snake_case , )
del vae_decoder
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
UpperCamelCase : List[Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 316 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Any = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class UpperCAmelCase_ ( a_ ):
'''simple docstring'''
A : Tuple = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=216 , _SCREAMING_SNAKE_CASE=1001 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
snake_case_ : List[str] = vocab_size
snake_case_ : str = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Any = hidden_act
snake_case_ : Tuple = intermediate_size
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : int = type_vocab_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : Any = position_embedding_type
snake_case_ : Optional[Any] = use_cache
snake_case_ : Tuple = classifier_dropout
# additional properties
snake_case_ : List[Any] = max_depth
snake_case_ : List[str] = max_xpath_tag_unit_embeddings
snake_case_ : str = max_xpath_subs_unit_embeddings
snake_case_ : str = tag_pad_id
snake_case_ : Tuple = subs_pad_id
snake_case_ : int = xpath_unit_hidden_size
| 357 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = DebertaTokenizer
A : List[Any] = True
A : Dict = DebertaTokenizerFast
def _lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Optional[int] = {"unk_token": "[UNK]"}
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : int = "lower newer"
snake_case_ : Dict = "lower newer"
return input_text, output_text
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : str = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tokens + [tokenizer.unk_token]
snake_case_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : str = tokenizer("Hello" , "World" )
snake_case_ : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : str = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.encode(
"sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case_ : Any = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) for seq in encoding["input_ids"]]
# fmt: off
snake_case_ : List[Any] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ : List[str] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , _SCREAMING_SNAKE_CASE )
for expected, decoded in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 36 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ="dandelin/vilt-b32-finetuned-vqa"
SCREAMING_SNAKE_CASE_ : Optional[int] =(
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
SCREAMING_SNAKE_CASE_ : Tuple ="image_qa"
SCREAMING_SNAKE_CASE_ : List[str] =AutoProcessor
SCREAMING_SNAKE_CASE_ : List[Any] =AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE_ : Optional[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Optional[int] =["text"]
def __init__( self : List[str] , *__A : str , **__A : List[str] ):
requires_backends(self , ['vision'] )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : List[Any] , __A : "Image" , __A : str ):
return self.pre_processor(__A , __A , return_tensors='pt' )
def _lowerCamelCase ( self : Any , __A : Tuple ):
with torch.no_grad():
return self.model(**__A ).logits
def _lowerCamelCase ( self : Tuple , __A : int ):
__UpperCamelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 53 | __lowerCamelCase : List[Any] = 6_5521
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
for plain_chr in plain_text:
SCREAMING_SNAKE_CASE__ = (a + ord(__UpperCamelCase )) % MOD_ADLER
SCREAMING_SNAKE_CASE__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 219 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase: Dict = logging.get_logger(__name__)
UpperCAmelCase: Dict = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "audio-spectrogram-transformer"
def __init__( self ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=16 ,UpperCAmelCase_=True ,UpperCAmelCase_=10 ,UpperCAmelCase_=10 ,UpperCAmelCase_=10_24 ,UpperCAmelCase_=1_28 ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Tuple = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : List[str] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Tuple = initializer_range
_lowercase : Dict = layer_norm_eps
_lowercase : List[str] = patch_size
_lowercase : Optional[Any] = qkv_bias
_lowercase : Dict = frequency_stride
_lowercase : str = time_stride
_lowercase : Dict = max_length
_lowercase : Tuple = num_mel_bins
| 336 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ["pixel_values"]
def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = 32, lowerCamelCase__=PILImageResampling.BILINEAR, lowerCamelCase__ = True, **lowerCamelCase__, ):
A : int = do_resize
A : str = do_rescale
A : List[Any] = size_divisor
A : Optional[Any] = resample
super().__init__(**lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__ ):
A , A : List[str] = get_image_size(lowerCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
A : Tuple = height // size_divisor * size_divisor
A : List[str] = width // size_divisor * size_divisor
A : int = resize(lowerCamelCase__, (new_h, new_w), resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__ ):
return rescale(image=lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__=None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ):
A : List[Any] = do_resize if do_resize is not None else self.do_resize
A : Dict = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = size_divisor if size_divisor is not None else self.size_divisor
A : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A : List[Any] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A : List[str] = [to_numpy_array(lowerCamelCase__ ) for img in images]
if do_resize:
A : Optional[Any] = [self.resize(lowerCamelCase__, size_divisor=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images]
if do_rescale:
A : Dict = [self.rescale(lowerCamelCase__, scale=1 / 255 ) for image in images]
A : Optional[Any] = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images]
A : str = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ )
| 116 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = (DDIMParallelScheduler,)
__lowerCamelCase : int = (("eta", 0.0), ("num_inference_steps", 50))
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : int = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase__ )
return config
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
A : Dict = self.scheduler_classes[0]
A : str = self.get_scheduler_config(**lowerCamelCase__ )
A : Tuple = scheduler_class(**lowerCamelCase__ )
A , A : Tuple = 10, 0.0
A : Optional[int] = self.dummy_model()
A : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
A : Optional[int] = model(lowerCamelCase__, lowerCamelCase__ )
A : str = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
return sample
def _lowerCAmelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
A : Tuple = self.scheduler_classes[0]
A : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
A : List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1] ) )
def _lowerCAmelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__, beta_end=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__, prediction_type=lowerCamelCase__, sample_max_value=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__, num_inference_steps=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__, eta=lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[str] = self.scheduler_classes[0]
A : List[str] = self.get_scheduler_config()
A : Dict = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420, 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980, 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487, 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999, 998 ) - 0.02 ) ) < 1e-5
def _lowerCAmelCase ( self ):
A : int = self.scheduler_classes[0]
A : Any = self.get_scheduler_config()
A : Union[str, Any] = scheduler_class(**lowerCamelCase__ )
A , A : List[str] = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
A : Any = self.dummy_model()
A : Dict = self.dummy_sample_deter
A : Dict = self.dummy_sample_deter + 0.1
A : Tuple = self.dummy_sample_deter - 0.1
A : Tuple = samplea.shape[0]
A : List[Any] = torch.stack([samplea, samplea, samplea], dim=0 )
A : str = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1, lowerCamelCase__ )
A : Any = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
A : Any = scheduler.batch_step_no_noise(lowerCamelCase__, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), lowerCamelCase__ )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _lowerCAmelCase ( self ):
A : Dict = self.full_loop()
A : str = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def _lowerCAmelCase ( self ):
A : str = self.full_loop(prediction_type="""v_prediction""" )
A : Any = torch.sum(torch.abs(lowerCamelCase__ ) )
A : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : Any = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : Dict = torch.sum(torch.abs(lowerCamelCase__ ) )
A : str = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _lowerCAmelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A : int = self.full_loop(set_alpha_to_one=lowerCamelCase__, beta_start=0.01 )
A : List[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
A : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 116 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _a ):
'''simple docstring'''
a_ : Any ="""esm"""
def __init__( self : Optional[Any] , UpperCamelCase : Tuple=None , UpperCamelCase : int=None , UpperCamelCase : int=None , UpperCamelCase : Tuple=7_68 , UpperCamelCase : Optional[Any]=12 , UpperCamelCase : List[str]=12 , UpperCamelCase : str=30_72 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Union[str, Any]=10_26 , UpperCamelCase : int=0.02 , UpperCamelCase : Any=1e-1_2 , UpperCamelCase : int="absolute" , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=None , UpperCamelCase : Any=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase , mask_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_snake_case : Optional[Any] = vocab_size
_snake_case : List[Any] = hidden_size
_snake_case : List[Any] = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Any = intermediate_size
_snake_case : Any = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : List[Any] = max_position_embeddings
_snake_case : Any = initializer_range
_snake_case : Dict = layer_norm_eps
_snake_case : int = position_embedding_type
_snake_case : Optional[int] = use_cache
_snake_case : str = emb_layer_norm_before
_snake_case : Dict = token_dropout
_snake_case : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_snake_case : int = EsmFoldConfig()
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = EsmFoldConfig(**__lowerCAmelCase )
_snake_case : Dict = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_snake_case : Union[str, Any] = get_default_vocab_list()
else:
_snake_case : Dict = vocab_list
else:
_snake_case : Optional[int] = None
_snake_case : Any = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , __lowerCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = super().to_dict()
if isinstance(self.esmfold_config , __lowerCAmelCase ):
_snake_case : Any = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =None
a_ : bool =True
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : float =0
a_ : bool =True
a_ : bool =False
a_ : int =128
a_ : "TrunkConfig" =None
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if self.trunk is None:
_snake_case : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , __lowerCAmelCase ):
_snake_case : int = TrunkConfig(**self.trunk )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = asdict(self )
_snake_case : List[Any] = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =48
a_ : int =1024
a_ : int =128
a_ : int =32
a_ : int =32
a_ : int =32
a_ : float =0
a_ : float =0
a_ : bool =False
a_ : int =4
a_ : Optional[int] =128
a_ : "StructureModuleConfig" =None
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
_snake_case : str = StructureModuleConfig()
elif isinstance(self.structure_module , __lowerCAmelCase ):
_snake_case : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_snake_case : Any = self.sequence_state_dim // self.sequence_head_width
_snake_case : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = asdict(self )
_snake_case : Any = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =384
a_ : int =128
a_ : int =16
a_ : int =128
a_ : int =12
a_ : int =4
a_ : int =8
a_ : float =0.1
a_ : int =8
a_ : int =1
a_ : int =2
a_ : int =7
a_ : int =10
a_ : float =1E-8
a_ : float =1E5
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return asdict(self )
def lowerCamelCase_ ( )-> Dict:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 371 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Dict , lowerCAmelCase: str )-> List[str]:
# Initialise PyTorch model
_snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case : Optional[int] = MobileBertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
_snake_case : Optional[int] = load_tf_weights_in_mobilebert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 260 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
UpperCamelCase :List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Tuple = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 38 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCamelCase : List[str] = 4
__UpperCamelCase : List[Any] = 3
class __lowerCAmelCase ( __magic_name__ ):
pass
def __A ( __lowerCamelCase ) -> Union[str, Any]:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __A ( ) -> str:
a = int(os.environ["""RANK"""] )
a = int(os.environ["""WORLD_SIZE"""] )
a = ArgumentParser()
parser.add_argument("""--streaming""" , type=__lowerCamelCase )
parser.add_argument("""--local_rank""" , type=__lowerCamelCase )
parser.add_argument("""--num_workers""" , type=__lowerCamelCase , default=0 )
a = parser.parse_args()
a = args.streaming
a = args.num_workers
a = {"""shards""": [f'shard_{shard_idx}' for shard_idx in range(__lowerCamelCase )]}
a = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
a = Dataset.from_list(list(__lowerCamelCase ) )
a = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
a = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 347 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.