code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ( ):
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase_ : str = [1, 2, 3]
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with parallel_backend('''unsupported backend''' ):
map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(__SCREAMING_SNAKE_CASE ):
with parallel_backend('''unsupported backend''' ):
map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = [1, 2]
lowercase_ : Optional[int] = {'''a''': 1, '''b''': 2}
lowercase_ : Optional[int] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase_ : List[Any] = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase_ : Any = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase_ : Union[str, Any] = [2, 3]
lowercase_ : Tuple = {'''a''': 2, '''b''': 3}
lowercase_ : Optional[int] = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase_ : int = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase_ : Optional[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
| 93 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.framework == "tf":
SCREAMING_SNAKE_CASE : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=A )
else:
raise ValueError('Unsupported framework' )
return masked_index
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_masked_index(A )
SCREAMING_SNAKE_CASE : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask', self.model.base_model_prefix, F"No mask_token ({self.tokenizer.mask_token}) found on the input", )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def UpperCamelCase_ ( self, A, A=None, **A ):
'''simple docstring'''
if return_tensors is None:
SCREAMING_SNAKE_CASE : Dict = self.framework
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(A, return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model(**A )
SCREAMING_SNAKE_CASE : List[str] = model_inputs['input_ids']
return model_outputs
def UpperCamelCase_ ( self, A, A=5, A=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
SCREAMING_SNAKE_CASE : List[str] = target_ids.shape[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['input_ids'][0]
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs['logits']
if self.framework == "tf":
SCREAMING_SNAKE_CASE : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
SCREAMING_SNAKE_CASE : Tuple = outputs.numpy()
SCREAMING_SNAKE_CASE : Any = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE : List[Any] = stable_softmax(A, axis=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.gather_nd(tf.squeeze(A, 0 ), target_ids.reshape(-1, 1 ) )
SCREAMING_SNAKE_CASE : Optional[int] = tf.expand_dims(A, 0 )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.math.top_k(A, k=A )
SCREAMING_SNAKE_CASE : int = topk.values.numpy(), topk.indices.numpy()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
SCREAMING_SNAKE_CASE : Optional[int] = outputs[0, masked_index, :]
SCREAMING_SNAKE_CASE : Any = logits.softmax(dim=-1 )
if target_ids is not None:
SCREAMING_SNAKE_CASE : int = probs[..., target_ids]
SCREAMING_SNAKE_CASE : Dict = probs.topk(A )
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for v, p in zip(_values, _predictions ):
# Copy is important since we're going to modify this array in place
SCREAMING_SNAKE_CASE : Tuple = input_ids.numpy().copy()
if target_ids is not None:
SCREAMING_SNAKE_CASE : Any = target_ids[p].tolist()
SCREAMING_SNAKE_CASE : List[Any] = p
# Filter padding out:
SCREAMING_SNAKE_CASE : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : List[Any] = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : List[Any] = [targets]
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.get_vocab()
except Exception:
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[str] = []
for target in targets:
SCREAMING_SNAKE_CASE : Dict = vocab.get(A, A )
if id_ is None:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
A, add_special_tokens=A, return_attention_mask=A, return_token_type_ids=A, max_length=1, truncation=A, )['input_ids']
if len(A ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'We cannot replace it with anything meaningful, ignoring it' )
continue
SCREAMING_SNAKE_CASE : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
SCREAMING_SNAKE_CASE : List[str] = list(set(A ) )
if len(A ) == 0:
raise ValueError('At least one target must be provided when passed.' )
SCREAMING_SNAKE_CASE : Any = np.array(A )
return target_ids
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if targets is not None:
SCREAMING_SNAKE_CASE : Any = self.get_target_ids(A, A )
SCREAMING_SNAKE_CASE : str = target_ids
if top_k is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask', self.model.base_model_prefix, 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self, A, *A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = super().__call__(A, **A )
if isinstance(A, A ) and len(A ) == 1:
return outputs[0]
return outputs
| 358 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tree
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["vqvae"]
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__, mel=lowerCAmelCase__, vqvae=lowerCAmelCase__)
def a_ ( self) -> int:
return 50 if isinstance(self.scheduler, lowerCAmelCase__) else 1000
@torch.no_grad()
def __call__( self, lowerCAmelCase__ = 1, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = 0, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=True, ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
snake_case_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__)
snake_case_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
snake_case_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
), generator=lowerCAmelCase__, device=self.device, )
snake_case_ = noise
snake_case_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.mel.audio_slice_to_image(lowerCAmelCase__)
snake_case_ = np.frombuffer(input_image.tobytes(), dtype='uint8').reshape(
(input_image.height, input_image.width))
snake_case_ = (input_image / 255) * 2 - 1
snake_case_ = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device)
if self.vqvae is not None:
snake_case_ = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__, 0)).latent_dist.sample(
generator=lowerCAmelCase__)[0]
snake_case_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, self.scheduler.timesteps[start_step - 1])
snake_case_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case_ = int(mask_start_secs * pixels_per_second)
snake_case_ = int(mask_end_secs * pixels_per_second)
snake_case_ = self.scheduler.add_noise(lowerCAmelCase__, lowerCAmelCase__, torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet, lowerCAmelCase__):
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)['sample']
else:
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__)['sample']
if isinstance(self.scheduler, lowerCAmelCase__):
snake_case_ = self.scheduler.step(
model_output=lowerCAmelCase__, timestep=lowerCAmelCase__, sample=lowerCAmelCase__, eta=lowerCAmelCase__, generator=lowerCAmelCase__, )['prev_sample']
else:
snake_case_ = self.scheduler.step(
model_output=lowerCAmelCase__, timestep=lowerCAmelCase__, sample=lowerCAmelCase__, generator=lowerCAmelCase__, )['prev_sample']
if mask is not None:
if mask_start > 0:
snake_case_ = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case_ = 1 / self.vqvae.config.scaling_factor * images
snake_case_ = self.vqvae.decode(lowerCAmelCase__)['sample']
snake_case_ = (images / 2 + 0.5).clamp(0, 1)
snake_case_ = images.cpu().permute(0, 2, 3, 1).numpy()
snake_case_ = (images * 255).round().astype('uint8')
snake_case_ = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__, mode='RGB').convert('L') for _ in images))
snake_case_ = [self.mel.image_to_audio(lowerCAmelCase__) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__)[:, np.newaxis, :]), **ImagePipelineOutput(lowerCAmelCase__))
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = 50) -> np.ndarray:
assert isinstance(self.scheduler, lowerCAmelCase__)
self.scheduler.set_timesteps(lowerCAmelCase__)
snake_case_ = np.array(
[np.frombuffer(image.tobytes(), dtype='uint8').reshape((1, image.height, image.width)) for image in images])
snake_case_ = (sample / 255) * 2 - 1
snake_case_ = torch.Tensor(lowerCAmelCase__).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))):
snake_case_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case_ = self.scheduler.alphas_cumprod[t]
snake_case_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case_ = 1 - alpha_prod_t
snake_case_ = self.unet(lowerCAmelCase__, lowerCAmelCase__)['sample']
snake_case_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> torch.Tensor:
snake_case_ = acos(torch.dot(torch.flatten(lowerCAmelCase__), torch.flatten(lowerCAmelCase__)) / torch.norm(lowerCAmelCase__) / torch.norm(lowerCAmelCase__))
return sin((1 - alpha) * theta) * xa / sin(lowerCAmelCase__) + sin(alpha * theta) * xa / sin(lowerCAmelCase__)
| 69 |
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ : str = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> str:
re.sub('<n>' , '' , __UpperCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__UpperCAmelCase ) ) | 225 | 0 |
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
A_ : List[str] = (0, 0)
A_ : int = None
A_ : Optional[int] = 0
A_ : Dict = 0
A_ : Optional[Any] = 0
def __eq__( self , lowercase ):
"""simple docstring"""
return self.position == cell.position
def lowerCAmelCase_ ( self ):
"""simple docstring"""
print(self.position )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase=(5, 5) ):
"""simple docstring"""
A_ : Dict = np.zeros(__lowercase )
A_ : Tuple = world_size[0]
A_ : List[Any] = world_size[1]
def lowerCAmelCase_ ( self ):
"""simple docstring"""
print(self.w )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A_ : List[str] = cell.position[0]
A_ : Dict = cell.position[1]
A_ : int = []
for n in neughbour_cord:
A_ : int = current_x + n[0]
A_ : Tuple = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A_ : int = Cell()
A_ : Optional[int] = (x, y)
A_ : Union[str, Any] = cell
neighbours.append(__lowercase )
return neighbours
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[Any] ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : str = []
A_ : str = []
_open.append(__lowercase )
while _open:
A_ : int = np.argmin([n.f for n in _open] )
A_ : Dict = _open[min_f]
_closed.append(_open.pop(__lowercase ) )
if current == goal:
break
for n in world.get_neigbours(__lowercase ):
for c in _closed:
if c == n:
continue
A_ : List[Any] = current.g + 1
A_ : int = n.position
A_ : List[Any] = goal.position
A_ : List[str] = (ya - ya) ** 2 + (xa - xa) ** 2
A_ : List[str] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__lowercase )
A_ : int = []
while current.parent is not None:
path.append(current.position )
A_ : Optional[int] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_UpperCAmelCase = Gridworld()
# Start position and goal
_UpperCAmelCase = Cell()
_UpperCAmelCase = (0, 0)
_UpperCAmelCase = Cell()
_UpperCAmelCase = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
_UpperCAmelCase = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_UpperCAmelCase = 1
print(world.w)
| 367 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : Optional[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.dummy_uncond_unet
A_ : List[Any] = DDIMScheduler()
A_ : Any = self.dummy_vq_model
A_ : int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : Dict = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
A_ : List[str] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A_ : str = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
A_ : Any = torch.manual_seed(0 )
A_ : List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
A_ : Tuple = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
A_ : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = '''table-transformer'''
lowerCamelCase : List[str] = ['''past_key_values''']
lowerCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=1_0_0 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : int=2_0_4_8 , UpperCAmelCase__ : List[str]=8 , UpperCAmelCase__ : Union[str, Any]=6 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]="relu" , UpperCAmelCase__ : Dict=2_5_6 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]="sine" , UpperCAmelCase__ : Optional[int]="resnet50" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[Any]=0.1 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config.get('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(UpperCAmelCase__ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None, None, None
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : int ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return self.d_model
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
return 1E-5
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return 1_2
| 4 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 1 |
__magic_name__ = [0, 2, 4, 6, 8]
__magic_name__ = [1, 3, 5, 7, 9]
def _lowerCAmelCase ( A__: int , A__: int , A__: list[int] , A__: int ):
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCAmelCase = 0
for digit in range(10 ):
UpperCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , A__ , A__ )
return result
UpperCAmelCase = 0
for digita in range(10 ):
UpperCAmelCase = digita
if (remainder + digita) % 2 == 0:
UpperCAmelCase = ODD_DIGITS
else:
UpperCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
UpperCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , A__ , A__ , )
return result
def _lowerCAmelCase ( A__: int = 9 ):
'''simple docstring'''
UpperCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(A__ , 0 , [0] * length , A__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 152 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__magic_name__ = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__magic_name__ = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
__magic_name__ = "|".join(sys.argv[1:])
__magic_name__ = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__magic_name__ = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 152 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class a__ :
"""simple docstring"""
def __init__( self , lowercase , ) -> List[Any]:
'''simple docstring'''
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = "gelu"
A__ = 0.1
A__ = 0.1
A__ = 512
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = TFDistilBertModel(config=lowercase )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase )
A__ = [input_ids, input_mask]
A__ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = TFDistilBertForMaskedLM(config=lowercase )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFDistilBertForQuestionAnswering(config=lowercase )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
A__ = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(lowercase )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(lowercase )
A__ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
A__ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(lowercase )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowerCamelCase = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=lowercase , dim=37 )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase )[0]
A__ = [1, 6, 768]
self.assertEqual(output.shape , lowercase )
A__ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
| 68 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> int:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Any ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : int ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : int ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : str , *UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Any ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Any ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Any:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ) -> Dict:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ) -> Any:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> int:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ) -> str:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : int ) -> int:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Optional[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> Tuple:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ) -> List[Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Union[str, Any]:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> str:
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase ( metaclass=__UpperCamelCase ):
UpperCAmelCase__ = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ) -> Optional[int]:
requires_backends(self , ['sentencepiece'] )
| 45 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase ( pl.LightningModule ):
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] ) -> int:
super().__init__()
lowerCamelCase__ : List[str] = model
lowerCamelCase__ : Dict = 2
lowerCamelCase__ : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# load longformer model from model identifier
lowerCamelCase__ : List[str] = LongformerModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : Dict = LightningModel(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowerCamelCase__ : Dict = LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 45 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 184 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xlm-roberta"
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=30522 , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 184 | 1 |
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
def count_of_possible_combinations(__A ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__A )
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
__A, __A ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase__ = sum(
count_of_possible_combinations_with_dp_array(target - item, __A )
for item in array )
UpperCAmelCase__ = answer
return answer
UpperCAmelCase__ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__A, __A )
def lowerCAmelCase_ ( __A, __A, __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = [0] * (target + 1)
UpperCAmelCase__ = 1
for i in range(1, target + 1 ):
for j in range(__A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = 3
UpperCamelCase__ = 5
UpperCamelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 358 | from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A, ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = True , lowercase = 1 / 2_5_5 , lowercase = None , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : int = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
A_ : Union[str, Any] = get_size_dict(lowercase )
A_ : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
A_ : Dict = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size' )
A_ : str = do_resize
A_ : Optional[int] = do_rescale
A_ : Dict = do_normalize
A_ : int = do_center_crop
A_ : Optional[int] = crop_size
A_ : List[str] = size
A_ : Tuple = resample
A_ : str = rescale_factor
A_ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A_ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : List[str] = get_size_dict(lowercase )
if "shortest_edge" in size:
A_ : Dict = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A_ : Tuple = (size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : str = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
"""simple docstring"""
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : int = crop_size if crop_size is not None else self.crop_size
A_ : Optional[Any] = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase )
A_ : Dict = resample if resample is not None else self.resample
A_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
A_ : List[Any] = image_std if image_std is not None else self.image_std
A_ : Union[str, Any] = size if size is not None else self.size
A_ : Any = get_size_dict(lowercase )
if not is_batched(lowercase ):
A_ : Optional[Any] = [images]
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
A_ : Optional[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : str = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
A_ : Union[str, Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Tuple = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : int = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 140 | import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Tuple ,__lowercase : Dict=False ):
'''simple docstring'''
A_ : str = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
A_ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
A_ : Tuple = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
A_ : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
A_ : Any = checkpoint[f'''{old_prefix}.skip_connection.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[str] ,__lowercase : int ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any]=None ):
'''simple docstring'''
A_ , A_ , A_ : Tuple = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 )
A_ , A_ , A_ : List[str] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 )
A_ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
A_ : str = checkpoint[f'''{old_prefix}.norm.bias''']
A_ : int = weight_q.squeeze(-1 ).squeeze(-1 )
A_ : int = bias_q.squeeze(-1 ).squeeze(-1 )
A_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
A_ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
A_ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
A_ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
A_ : Any = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
A_ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( __lowercase : str ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )
A_ : Dict = {}
A_ : Dict = checkpoint['time_embed.0.weight']
A_ : Any = checkpoint['time_embed.0.bias']
A_ : Union[str, Any] = checkpoint['time_embed.2.weight']
A_ : Tuple = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
A_ : Dict = checkpoint['label_emb.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.bias']
A_ : str = unet_config['down_block_types']
A_ : List[str] = unet_config['layers_per_block']
A_ : Any = unet_config['attention_head_dim']
A_ : int = unet_config['block_out_channels']
A_ : Union[str, Any] = 1
A_ : List[str] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
A_ : List[Any] = channels_list[i]
A_ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
A_ : Any = f'''down_blocks.{i}.resnets.{j}'''
A_ : str = f'''input_blocks.{current_layer}.0'''
A_ : List[Any] = True if j == 0 and downsample_block_has_skip else False
A_ : Any = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
A_ : Dict = f'''down_blocks.{i}.resnets.{j}'''
A_ : Optional[int] = f'''input_blocks.{current_layer}.0'''
A_ : str = True if j == 0 and downsample_block_has_skip else False
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}'''
A_ : Union[str, Any] = f'''input_blocks.{current_layer}.1'''
A_ : Tuple = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : List[Any] = f'''down_blocks.{i}.downsamplers.0'''
A_ : Dict = f'''input_blocks.{current_layer}.0'''
A_ : Tuple = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
A_ : Tuple = current_channels
# hardcoded the mid-block for now
A_ : int = 'mid_block.resnets.0'
A_ : Dict = 'middle_block.0'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 'mid_block.attentions.0'
A_ : Any = 'middle_block.1'
A_ : Tuple = convert_attention(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = 'mid_block.resnets.1'
A_ : Any = 'middle_block.2'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 0
A_ : Optional[Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A_ : Dict = f'''up_blocks.{i}.resnets.{j}'''
A_ : Dict = f'''output_blocks.{current_layer}.0'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0'''
A_ : List[str] = f'''output_blocks.{current_layer-1}.1'''
A_ : Optional[Any] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A_ : int = f'''up_blocks.{i}.resnets.{j}'''
A_ : Union[str, Any] = f'''output_blocks.{current_layer}.0'''
A_ : Optional[int] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
A_ : Any = f'''output_blocks.{current_layer}.1'''
A_ : List[str] = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Dict = f'''up_blocks.{i}.upsamplers.0'''
A_ : Any = f'''output_blocks.{current_layer-1}.2'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Any = checkpoint['out.0.weight']
A_ : Dict = checkpoint['out.0.bias']
A_ : int = checkpoint['out.2.weight']
A_ : List[str] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = strabool(args.class_cond)
_UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_UpperCAmelCase = None
_UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 140 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = Dict[str, Any]
snake_case_ : Optional[int] = List[Prediction]
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : str , *_snake_case : List[str] , **_snake_case : Any):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , '''vision''')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def lowerCamelCase ( self : int , **_snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[int] , *_snake_case : List[Any] , **_snake_case : str):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = load_image(_snake_case)
UpperCAmelCase_ = torch.IntTensor([[image.height, image.width]])
UpperCAmelCase_ = self.image_processor(images=[image] , return_tensors='''pt''')
if self.tokenizer is not None:
UpperCAmelCase_ = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''')
UpperCAmelCase_ = target_size
return inputs
def lowerCamelCase ( self : int , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = model_inputs.pop('''target_size''')
UpperCAmelCase_ = self.model(**_snake_case)
UpperCAmelCase_ = outputs.__class__({'''target_size''': target_size, **outputs})
if self.tokenizer is not None:
UpperCAmelCase_ = model_inputs['''bbox''']
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Any , _snake_case : List[str]=0.9):
"""simple docstring"""
UpperCAmelCase_ = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ = target_size[0].tolist()
def unnormalize(_snake_case : Optional[int]):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
UpperCAmelCase_ , UpperCAmelCase_ = model_outputs['''logits'''].squeeze(0).softmax(dim=-1).max(dim=-1)
UpperCAmelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ = [unnormalize(_snake_case) for bbox in model_outputs['''bbox'''].squeeze(0)]
UpperCAmelCase_ = ['''score''', '''label''', '''box''']
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for vals in zip(scores.tolist() , _snake_case , _snake_case) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ = self.image_processor.post_process_object_detection(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = raw_annotations[0]
UpperCAmelCase_ = raw_annotation['''scores''']
UpperCAmelCase_ = raw_annotation['''labels''']
UpperCAmelCase_ = raw_annotation['''boxes''']
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ = [self._get_bounding_box(_snake_case) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ = ['''score''', '''label''', '''box''']
UpperCAmelCase_ = [
dict(zip(_snake_case , _snake_case))
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''])
]
return annotation
def lowerCamelCase ( self : Union[str, Any] , _snake_case : "torch.Tensor"):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''')
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 7 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowercase : List[str] = logging.get_logger('transformers.models.encodec')
_lowercase : str = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowercase : str = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowercase : Union[str, Any] = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowercase : Union[str, Any] = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowercase : Union[str, Any] = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowercase : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowercase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowercase : Dict = []
_lowercase : int = []
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] , snake_case_ :Any , snake_case_ :Tuple ):
for attribute in key.split('''.''' ):
__UpperCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__UpperCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase = value
elif weight_type == "weight_g":
__UpperCAmelCase = value
elif weight_type == "weight_v":
__UpperCAmelCase = value
elif weight_type == "bias":
__UpperCAmelCase = value
elif weight_type == "running_mean":
__UpperCAmelCase = value
elif weight_type == "running_var":
__UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
__UpperCAmelCase = value
elif weight_type == "weight_ih_l0":
__UpperCAmelCase = value
elif weight_type == "weight_hh_l0":
__UpperCAmelCase = value
elif weight_type == "bias_ih_l0":
__UpperCAmelCase = value
elif weight_type == "bias_hh_l0":
__UpperCAmelCase = value
elif weight_type == "weight_ih_l1":
__UpperCAmelCase = value
elif weight_type == "weight_hh_l1":
__UpperCAmelCase = value
elif weight_type == "bias_ih_l1":
__UpperCAmelCase = value
elif weight_type == "bias_hh_l1":
__UpperCAmelCase = value
else:
__UpperCAmelCase = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :str ):
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCAmelCase , __UpperCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :int , snake_case_ :Dict ):
__UpperCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCAmelCase = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(snake_case_ , snake_case_ ):
logger.info(F'''{name} was ignored''' )
continue
__UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCAmelCase , __UpperCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
__UpperCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__UpperCAmelCase = True
if "*" in mapped_key:
__UpperCAmelCase = name.split(snake_case_ )[0].split('''.''' )[-2]
__UpperCAmelCase = mapped_key.replace('''*''' , snake_case_ )
if "weight_g" in name:
__UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase = '''weight_v'''
elif "weight_ih_l0" in name:
__UpperCAmelCase = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__UpperCAmelCase = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__UpperCAmelCase = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__UpperCAmelCase = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__UpperCAmelCase = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__UpperCAmelCase = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__UpperCAmelCase = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__UpperCAmelCase = '''bias_hh_l1'''
elif "bias" in name:
__UpperCAmelCase = '''bias'''
elif "weight" in name:
__UpperCAmelCase = '''weight'''
elif "running_mean" in name:
__UpperCAmelCase = '''running_mean'''
elif "running_var" in name:
__UpperCAmelCase = '''running_var'''
elif "num_batches_tracked" in name:
__UpperCAmelCase = '''num_batches_tracked'''
else:
__UpperCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[str] , snake_case_ :Optional[int] , snake_case_ :Tuple=None , snake_case_ :int=None , ):
if config_path is not None:
__UpperCAmelCase = EncodecConfig.from_pretrained(snake_case_ )
else:
__UpperCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCAmelCase = [8, 5, 4, 4]
__UpperCAmelCase = [2.2]
__UpperCAmelCase = 64
__UpperCAmelCase = 32_000
__UpperCAmelCase = 2_048
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
elif model_name == "encodec_48khz":
__UpperCAmelCase = [8, 5, 4, 2]
__UpperCAmelCase = [3.0, 6.0, 12.0, 24.0]
__UpperCAmelCase = 48_000
__UpperCAmelCase = 2
__UpperCAmelCase = False
__UpperCAmelCase = '''time_group_norm'''
__UpperCAmelCase = True
__UpperCAmelCase = 1.0
__UpperCAmelCase = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
__UpperCAmelCase = EncodecModel(snake_case_ )
__UpperCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(snake_case_ )
__UpperCAmelCase = torch.load(snake_case_ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCAmelCase = original_checkpoint['''best_state''']
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowercase : Optional[int] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 332 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase__ ( ):
raise RuntimeError('''CUDA out of memory.''' )
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] ):
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def a ( self : Optional[int] , _lowercase : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
def a ( self : Optional[int] ):
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ):
nonlocal batch_sizes
batch_sizes.append(_lowercase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def a ( self : Tuple ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_lowercase : Optional[int] ):
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : List[Any] ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : List[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def a ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def a ( self : Dict ):
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_lowercase : int ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(_lowercase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def a ( self : str ):
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase )
__UpperCAmelCase = release_memory(_lowercase )
self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
| 332 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case :
def __init__( self : Tuple , a__ : List[str] , a__ : Union[str, Any]=13 , a__ : Any=7 , a__ : List[str]=True , a__ : int=True , a__ : Tuple=False , a__ : Any=True , a__ : Any=99 , a__ : Tuple=32 , a__ : List[Any]=5 , a__ : str=4 , a__ : List[Any]=37 , a__ : List[str]="gelu" , a__ : Optional[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=5_12 , a__ : Optional[Any]=16 , a__ : str=2 , a__ : Dict=0.0_2 , a__ : List[str]=3 , a__ : str=4 , a__ : Optional[int]=None , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def a_ ( self : Tuple ) -> str:
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def a_ ( self : Dict , a__ : Optional[Any] , a__ : List[Any] , a__ : int , a__ : int , a__ : int , a__ : Any , a__ : Tuple ) -> List[str]:
'''simple docstring'''
_A = LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_A = model(lowercase_ , attention_mask=lowercase_ )
_A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : Union[str, Any] , a__ : Optional[Any] , a__ : str , a__ : List[Any] , a__ : Dict , a__ : Dict , a__ : Any , a__ : str , a__ : Any , a__ : Optional[int] , ) -> List[str]:
'''simple docstring'''
_A = True
_A = LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_A = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
_A = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
_A = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : Tuple , a__ : int , a__ : int , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Any , a__ : Tuple , a__ : Union[str, Any] , a__ : int , a__ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
_A = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
_A = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : List[Any] , a__ : Tuple , a__ : Optional[int] , a__ : List[str] , a__ : List[str] , a__ : Dict , a__ : Union[str, Any] , a__ : int , a__ : Optional[Any] , a__ : Optional[int] , ) -> int:
'''simple docstring'''
_A = True
_A = True
_A = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
_A = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
_A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3) , config.vocab_size )
_A = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = torch.cat([input_mask, next_mask] , dim=-1 )
_A = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
_A = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, -3:, random_slice_idx].detach()
_A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( a_ , a_ , a_ , unittest.TestCase):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_A = LlamaModelTester(self )
_A = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : int ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A = type
self.model_tester.create_and_check_model(*lowercase_ )
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = input_dict["input_ids"]
_A = input_ids.ne(1 ).to(lowercase_ )
_A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_A = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = "single_label_classification"
_A = input_dict["input_ids"]
_A = input_ids.ne(1 ).to(lowercase_ )
_A = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_A = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_A = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def a_ ( self : str ) -> List[str]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = 3
_A = "multi_label_classification"
_A = input_dict["input_ids"]
_A = input_ids.ne(1 ).to(lowercase_ )
_A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_A = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_A = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def a_ ( self : int , a__ : Optional[Any] ) -> int:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = ids_tensor([1, 10] , config.vocab_size )
_A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A = LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
_A = original_model(lowercase_ ).last_hidden_state
_A = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_A = {"type": scaling_type, "factor": 1_0.0}
_A = LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
_A = scaled_model(lowercase_ ).last_hidden_state
_A = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
@require_torch
class snake_case ( unittest.TestCase):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_A = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
_A = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_A = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_A = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_A = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
_A = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_A = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def a_ ( self : Any ) -> str:
'''simple docstring'''
_A = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_A = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
_A = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
_A = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_A = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_A = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_A = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
_A = model(torch.tensor(lowercase_ ) )
_A = torch.tensor(
[[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_A = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowercase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
_A = "Simply put, the theory of relativity states that "
_A = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
_A = tokenizer.encode(lowercase_ , return_tensors="pt" )
_A = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=lowercase_ )
# greedy generation outputs
_A = model.generate(lowercase_ , max_new_tokens=64 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
_A = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ ) | 364 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( __lowercase ) -> List[Any]:
_A = 384
if "tiny" in model_name:
_A = [3, 3, 9, 3]
_A = [96, 192, 384, 768]
if "small" in model_name:
_A = [3, 3, 27, 3]
_A = [96, 192, 384, 768]
if "base" in model_name:
_A = [3, 3, 27, 3]
_A = [128, 256, 512, 1024]
_A = 512
if "large" in model_name:
_A = [3, 3, 27, 3]
_A = [192, 384, 768, 1536]
_A = 768
if "xlarge" in model_name:
_A = [3, 3, 27, 3]
_A = [256, 512, 1024, 2048]
_A = 1024
# set label information
_A = 150
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def a__ ( __lowercase ) -> List[Any]:
_A = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = dct.pop(__lowercase )
_A = val
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_A = model_name_to_url[model_name]
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
_A = get_upernet_config(__lowercase )
_A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
if "bn" in key:
_A = key.replace("bn" , "batch_norm" )
_A = val
# rename keys
_A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
_A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("RGB" )
_A = SegformerImageProcessor()
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_A = model(__lowercase )
if model_name == "upernet-convnext-tiny":
_A = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_A = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_A = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_A = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_A = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] =logging.get_logger(__name__)
_A : Any ={
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _lowercase ( __a ):
a = """trocr"""
a = ["""past_key_values"""]
a = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Optional[int] , UpperCamelCase__: int=50_265 , UpperCamelCase__: Dict=1_024 , UpperCamelCase__: str=12 , UpperCamelCase__: Union[str, Any]=16 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Tuple="gelu" , UpperCamelCase__: Any=512 , UpperCamelCase__: Optional[int]=0.1 , UpperCamelCase__: Optional[int]=0.0 , UpperCamelCase__: List[Any]=0.0 , UpperCamelCase__: List[Any]=2 , UpperCamelCase__: int=0.02 , UpperCamelCase__: List[str]=0.0 , UpperCamelCase__: List[str]=True , UpperCamelCase__: str=False , UpperCamelCase__: List[str]=True , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Optional[int]=1 , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: int=2 , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[int] = d_model
lowerCamelCase__ : List[Any] = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : Optional[int] = decoder_ffn_dim
lowerCamelCase__ : Optional[int] = activation_function
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Tuple = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : List[str] = activation_dropout
lowerCamelCase__ : List[Any] = init_std
lowerCamelCase__ : Optional[Any] = decoder_layerdrop
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Union[str, Any] = scale_embedding
lowerCamelCase__ : str = use_learned_position_embeddings
lowerCamelCase__ : Tuple = layernorm_embedding
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , decoder_start_token_id=_A , **_A , )
| 41 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int, _lowerCAmelCase : Optional[int] ) -> Dict:
_UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase : Tuple = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
_UpperCAmelCase : Any = value
else:
_UpperCAmelCase : List[Any] = value
return new_state_dict
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple=False ) -> Optional[Any]:
_UpperCAmelCase : int = """"""
if is_panoptic:
_UpperCAmelCase : str = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[:256, :]
_UpperCAmelCase : Tuple = in_proj_bias[:256]
_UpperCAmelCase : Optional[int] = in_proj_weight[256:512, :]
_UpperCAmelCase : str = in_proj_bias[256:512]
_UpperCAmelCase : int = in_proj_weight[-256:, :]
_UpperCAmelCase : List[Any] = in_proj_bias[-256:]
def UpperCamelCase ( ) -> Any:
_UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : Dict = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ) -> List[Any]:
_UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_UpperCAmelCase : Dict = """resnet101"""
if "dc5" in model_name:
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
_UpperCAmelCase : Optional[int] = 250
else:
_UpperCAmelCase : str = 91
_UpperCAmelCase : Optional[int] = """huggingface/label-files"""
_UpperCAmelCase : str = """coco-detection-id2label.json"""
_UpperCAmelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type="""dataset""" ), """r""" ) )
_UpperCAmelCase : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
_UpperCAmelCase : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_UpperCAmelCase : int = ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=_lowerCAmelCase, return_tensors="""pt""" )
_UpperCAmelCase : Any = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
_UpperCAmelCase : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""", _lowerCAmelCase, pretrained=_lowerCAmelCase ).eval()
_UpperCAmelCase : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_UpperCAmelCase : Optional[int] = """conditional_detr.""" + src
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase, is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase : Optional[Any] = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : Union[str, Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase, organization="""DepuMeng""", commit_message="""Add model""" )
# verify our conversion
_UpperCAmelCase : Any = conditional_detr(_lowerCAmelCase )
_UpperCAmelCase : int = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 246 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : Optional[int] = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
snake_case_ = "align_text_model"
def __init__( self : Any , lowercase_ : Any=3_0522 , lowercase_ : Dict=768 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=12 , lowercase_ : Optional[int]=3072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : str=1e-12 , lowercase_ : Any=0 , lowercase_ : Optional[int]="absolute" , lowercase_ : Union[str, Any]=True , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
@classmethod
def A_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Tuple ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align_vision_model"
def __init__( self : Tuple , lowercase_ : int = 3 , lowercase_ : int = 600 , lowercase_ : float = 2.0 , lowercase_ : float = 3.1 , lowercase_ : int = 8 , lowercase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase_ : List[int] = [] , lowercase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase_ : float = 0.25 , lowercase_ : str = "swish" , lowercase_ : int = 2560 , lowercase_ : str = "mean" , lowercase_ : float = 0.02 , lowercase_ : float = 0.001 , lowercase_ : float = 0.99 , lowercase_ : float = 0.2 , **lowercase_ : List[Any] , ):
super().__init__(**lowercase_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = drop_connect_rate
snake_case_ = sum(lowercase_ ) * 4
@classmethod
def A_ ( cls : List[str] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : str ):
cls._set_token_in_kwargs(lowercase_ )
snake_case_ ,snake_case_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
snake_case_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase_ , **lowercase_ )
class a ( _lowerCamelCase ):
snake_case_ = "align"
snake_case_ = True
def __init__( self : int , lowercase_ : Union[str, Any]=None , lowercase_ : Any=None , lowercase_ : List[str]=640 , lowercase_ : Any=1.0 , lowercase_ : Union[str, Any]=0.02 , **lowercase_ : int , ):
super().__init__(**lowercase_ )
if text_config is None:
snake_case_ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
snake_case_ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
snake_case_ = AlignTextConfig(**lowercase_ )
snake_case_ = AlignVisionConfig(**lowercase_ )
snake_case_ = projection_dim
snake_case_ = temperature_init_value
snake_case_ = initializer_range
@classmethod
def A_ ( cls : Any , lowercase_ : AlignTextConfig , lowercase_ : AlignVisionConfig , **lowercase_ : int ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 72 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=13 , lowercase_ : int=64 , lowercase_ : Tuple=2 , lowercase_ : List[str]=3 , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : int=32 , lowercase_ : int=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : Any=10 , lowercase_ : List[str]=0.02 , lowercase_ : Tuple=[1, 16, 4, 4] , lowercase_ : Tuple=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case_ = (self.image_size // 32) ** 2
snake_case_ = num_patches + 1
def A_ ( self : List[Any] ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ):
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : int ):
snake_case_ = ViTHybridModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTHybridForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ):
snake_case_ = ViTHybridModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def A_ ( self : Any ):
pass
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def A_ ( self : Dict ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowercase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowercase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case_ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def A_ ( self : Tuple ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTHybridModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : List[str] ):
snake_case_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**lowercase_ )
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
@slow
@require_accelerate
def A_ ( self : Dict ):
snake_case_ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
snake_case_ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowercase_ , return_tensors='''pt''' )
snake_case_ = model(**lowercase_ )
snake_case_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 72 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 |
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any] ):
super().__init__(*_snake_case , **_snake_case )
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Dict ):
__lowercase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
__lowercase : List[Any] = self.values[key]
def snake_case_ ( self : Any ):
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ ( self : int , _snake_case : str , _snake_case : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case )
| 156 | 0 |
class lowerCAmelCase :
def __init__( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = {} # Mapping from char to TrieNode
lowercase__ = False
def UpperCAmelCase ( self :List[str] , _lowercase :list[str] ):
'''simple docstring'''
for word in words:
self.insert(_lowercase )
def UpperCAmelCase ( self :int , _lowercase :str ):
'''simple docstring'''
lowercase__ = self
for char in word:
if char not in curr.nodes:
lowercase__ = TrieNode()
lowercase__ = curr.nodes[char]
lowercase__ = True
def UpperCAmelCase ( self :Optional[int] , _lowercase :str ):
'''simple docstring'''
lowercase__ = self
for char in word:
if char not in curr.nodes:
return False
lowercase__ = curr.nodes[char]
return curr.is_leaf
def UpperCAmelCase ( self :Tuple , _lowercase :str ):
'''simple docstring'''
def _delete(_lowercase :TrieNode , _lowercase :str , _lowercase :int ) -> bool:
if index == len(_lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase__ = False
return len(curr.nodes ) == 0
lowercase__ = word[index]
lowercase__ = curr.nodes.get(_lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase__ = _delete(_lowercase , _lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowercase , 0 )
def _A ( __magic_name__ , __magic_name__ ):
if node.is_leaf:
print(__magic_name__ , end=" " )
for key, value in node.nodes.items():
print_words(__magic_name__ , word + key )
def _A ( ):
lowercase__ = "banana bananas bandana band apple all beast".split()
lowercase__ = TrieNode()
root.insert_many(__magic_name__ )
# print_words(root, "")
assert all(root.find(__magic_name__ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _A ( __magic_name__ , __magic_name__ ):
print(str(__magic_name__ ) , "works!" if passes else "doesn't work :(" )
def _A ( ):
assert test_trie()
def _A ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 201 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = 1
@register_to_config
def __init__( self :Dict , _lowercase :int = 10_00 , _lowercase :Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(_lowercase )
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase__ = 4
# running values
lowercase__ = []
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = num_inference_steps
lowercase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase__ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase__ = (1.0 - self.betas**2) ** 0.5
lowercase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase__ = timesteps.to(_lowercase )
lowercase__ = []
def UpperCAmelCase ( self :Optional[int] , _lowercase :torch.FloatTensor , _lowercase :int , _lowercase :torch.FloatTensor , _lowercase :bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase__ = (self.timesteps == timestep).nonzero().item()
lowercase__ = timestep_index + 1
lowercase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase )
if len(self.ets ) == 1:
lowercase__ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase__ = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :torch.FloatTensor , *_lowercase :int , **_lowercase :int ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :int , _lowercase :Optional[Any] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.alphas[timestep_index]
lowercase__ = self.betas[timestep_index]
lowercase__ = self.alphas[prev_timestep_index]
lowercase__ = self.betas[prev_timestep_index]
lowercase__ = (sample - sigma * ets) / max(_lowercase , 1e-8 )
lowercase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 201 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
return getitem, k
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: str ):
"""simple docstring"""
return setitem, k, v
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
return delitem, k
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Tuple , *lowerCAmelCase__: Any ):
"""simple docstring"""
try:
return fun(lowerCAmelCase__ , *lowerCAmelCase__ ), None
except Exception as e:
return None, e
a : Any = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a : List[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = HashMap(initial_block_size=4 )
UpperCAmelCase_: List[Any] = {}
for _, (fun, *args) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ , UpperCAmelCase_: List[str] = _run_operation(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_: List[Any] = _run_operation(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
assert my_res == py_res
assert str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )
assert set(lowerCAmelCase__ ) == set(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase_ ():
"""simple docstring"""
def is_public(lowerCAmelCase__: str ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase_: List[str] = {name for name in dir({} ) if is_public(lowerCAmelCase__ )}
UpperCAmelCase_: str = {name for name in dir(HashMap() ) if is_public(lowerCAmelCase__ )}
assert dict_public_names > hash_public_names
| 147 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowerCAmelCase__ )}'
| 147 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=True , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size_divisor
UpperCamelCase__ = do_rescale
def UpperCAmelCase_ (self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GLPNImageProcessor if is_vision_available() else None
def UpperCAmelCase_ (self ):
UpperCamelCase__ = GLPNImageProcessingTester(self )
@property
def UpperCAmelCase_ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size_divisor""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """resample""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_rescale""" ) )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 178 | 0 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = [10, 20, 30, 40, 50, 60]
__a = [2, 4, 6, 8, 10, 12]
__a = 100
self.assertEqual(kp.calc_profit(_a , _a , _a ) , 210 )
def __UpperCAmelCase ( self ):
self.assertRaisesRegex(_a , '''max_weight must greater than zero.''' )
def __UpperCAmelCase ( self ):
self.assertRaisesRegex(_a , '''Weight can not be negative.''' )
def __UpperCAmelCase ( self ):
self.assertRaisesRegex(_a , '''Profit can not be negative.''' )
def __UpperCAmelCase ( self ):
self.assertRaisesRegex(_a , '''max_weight must greater than zero.''' )
def __UpperCAmelCase ( self ):
self.assertRaisesRegex(
_a , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 45 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = HfArgumentParser(A_ )
_lowerCamelCase : str = parser.parse_args_into_dataclasses()[0]
_lowerCamelCase : int = TensorFlowBenchmark(args=A_ )
try:
_lowerCamelCase : int = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowerCamelCase : Tuple = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_lowerCamelCase : int = ''' '''.join(str(A_ ).split(''' ''' )[:-1] )
_lowerCamelCase : Dict = ''''''
_lowerCamelCase : str = eval(str(A_ ).split(''' ''' )[-1] )
_lowerCamelCase : Dict = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : str = full_error_msg + begin_error_msg + str(A_ )
raise ValueError(A_ )
benchmark.run()
if __name__ == "__main__":
main()
| 354 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( _lowercase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
snake_case__ : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs()
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.get_dummy_inputs()
_lowerCamelCase : str = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCamelCase : Tuple = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ort.SessionOptions()
_lowerCamelCase : List[str] = False
return options
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : Any = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : int = init_image.resize((1_2_8, 1_2_8) )
_lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 175 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowerCAmelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
UpperCAmelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
UpperCAmelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
UpperCAmelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
UpperCAmelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
UpperCAmelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
UpperCAmelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
UpperCAmelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
UpperCAmelCase_ = key.replace("image_encoder.module" , "flava.image_model" )
UpperCAmelCase_ = key.replace("text_encoder.module" , "flava.text_model" )
UpperCAmelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
UpperCAmelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" )
UpperCAmelCase_ = key.replace("text_projection" , "flava.text_projection" )
UpperCAmelCase_ = key.replace("image_projection" , "flava.image_projection" )
UpperCAmelCase_ = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_ = value
return upgrade
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = FlavaConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = FlavaConfig()
UpperCAmelCase_ = FlavaForPreTraining(snake_case_ ).eval()
UpperCAmelCase_ = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(snake_case_ )
UpperCAmelCase_ = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _lowercase ( __A ,__A ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__A ,__A ) ) )
def _lowercase ( __A ,__A ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
__UpperCamelCase = (
"""Wrong input data's dimensions... """
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
__UpperCamelCase = (
"""Wrong input data's shape... """
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__UpperCamelCase = (
"""Input data have different datatype... """
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(__A )
__UpperCamelCase = []
for value in value_array:
__UpperCamelCase = euclidean(__A ,dataset[0] )
__UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
__UpperCamelCase = euclidean(__A ,__A )
if dist > temp_dist:
__UpperCamelCase = temp_dist
__UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _lowercase ( __A ,__A ):
'''simple docstring'''
return np.dot(__A ,__A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
import argparse
import struct
import unittest
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = data
# Initialize hash values
snake_case = [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
snake_case = [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
snake_case = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def a_ ( __snake_case ):
snake_case = b'''\x80''' + (b'''\x00''' * (6_3 - (len(__snake_case ) + 8) % 6_4))
snake_case = struct.pack('''>Q''' , (len(__snake_case ) * 8) )
return data + padding + big_endian_integer
def a_ ( self ):
# Convert into blocks of 64 bytes
snake_case = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case = list(struct.unpack('''>16L''' , __snake_case ) )
# add 48 0-ed integers
words += [0] * 4_8
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
snake_case = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
snake_case = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
snake_case = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
snake_case = self.ror(__snake_case , 6 ) ^ self.ror(__snake_case , 1_1 ) ^ self.ror(__snake_case , 2_5 )
snake_case = (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
snake_case = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
snake_case = self.ror(__snake_case , 2 ) ^ self.ror(__snake_case , 1_3 ) ^ self.ror(__snake_case , 2_2 )
snake_case = (a & b) ^ (a & c) ^ (b & c)
snake_case = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
snake_case = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
snake_case = ''''''.join([hex(__snake_case )[2:].zfill(8 ) for value in self.hashes] )
def a_ ( self , __snake_case , __snake_case ):
return 0Xf_f_f_f_f_f_f_f & (value << (3_2 - rotations)) | (value >> rotations)
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
import hashlib
snake_case = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(__snake_case ).hash , hashlib.shaaaa(__snake_case ).hexdigest() )
def UpperCAmelCase__ ():
"""simple docstring"""
import doctest
doctest.testmod()
snake_case = argparse.ArgumentParser()
parser.add_argument(
'''-s''' ,'''--string''' ,dest='''input_string''' ,default='''Hello World!! Welcome to Cryptography''' ,help='''Hash the string''' ,)
parser.add_argument(
'''-f''' ,'''--file''' ,dest='''input_file''' ,help='''Hash contents of a file''' )
snake_case = parser.parse_args()
snake_case = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,'''rb''' ) as f:
snake_case = f.read()
else:
snake_case = bytes(UpperCamelCase_ ,'''utf-8''' )
print(SHAaaa(UpperCamelCase_ ).hash )
if __name__ == "__main__":
main()
| 355 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : int = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 213 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = Dict[str, Any]
lowercase_ = List[Prediction]
@add_end_docstrings(_UpperCAmelCase )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int],*lowercase_ : Union[str, Any],**lowercase_ : Tuple )-> int:
'''simple docstring'''
super().__init__(*lowercase_,**lowercase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self,'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def snake_case__ ( self : Union[str, Any],**lowercase_ : int )-> Any:
'''simple docstring'''
A__ = {}
if "threshold" in kwargs:
A__ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : List[str],*lowercase_ : int,**lowercase_ : Union[str, Any] )-> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*lowercase_,**lowercase_ )
def snake_case__ ( self : Tuple,lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = load_image(lowercase_ )
A__ = torch.IntTensor([[image.height, image.width]] )
A__ = self.image_processor(images=[image],return_tensors='pt' )
if self.tokenizer is not None:
A__ = self.tokenizer(text=inputs['words'],boxes=inputs['boxes'],return_tensors='pt' )
A__ = target_size
return inputs
def snake_case__ ( self : Dict,lowercase_ : int )-> str:
'''simple docstring'''
A__ = model_inputs.pop('target_size' )
A__ = self.model(**lowercase_ )
A__ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
A__ = model_inputs['bbox']
return model_outputs
def snake_case__ ( self : List[str],lowercase_ : Tuple,lowercase_ : Union[str, Any]=0.9 )-> List[str]:
'''simple docstring'''
A__ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A__ , A__ = target_size[0].tolist()
def unnormalize(lowercase_ : List[str] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
A__ , A__ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A__ = [unnormalize(lowercase_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
A__ = ['score', 'label', 'box']
A__ = [dict(zip(lowercase_,lowercase_ ) ) for vals in zip(scores.tolist(),lowercase_,lowercase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A__ = self.image_processor.post_process_object_detection(lowercase_,lowercase_,lowercase_ )
A__ = raw_annotations[0]
A__ = raw_annotation['scores']
A__ = raw_annotation['labels']
A__ = raw_annotation['boxes']
A__ = scores.tolist()
A__ = [self.model.config.idalabel[label.item()] for label in labels]
A__ = [self._get_bounding_box(lowercase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A__ = ['score', 'label', 'box']
A__ = [
dict(zip(lowercase_,lowercase_ ) )
for vals in zip(raw_annotation['scores'],raw_annotation['labels'],raw_annotation['boxes'] )
]
return annotation
def snake_case__ ( self : Optional[int],lowercase_ : "torch.Tensor" )-> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
A__ , A__ , A__ , A__ = box.int().tolist()
A__ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 7 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__SCREAMING_SNAKE_CASE :List[str] = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["hidden_dim"]
_UpperCAmelCase = CONFIG_MAP[model_name]["width_coef"]
_UpperCAmelCase = CONFIG_MAP[model_name]["depth_coef"]
_UpperCAmelCase = CONFIG_MAP[model_name]["image_size"]
_UpperCAmelCase = CONFIG_MAP[model_name]["dropout_rate"]
_UpperCAmelCase = CONFIG_MAP[model_name]["dw_padding"]
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "imagenet-1k-id2label.json"
_UpperCAmelCase = 1000
_UpperCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def UpperCAmelCase_ ( __lowercase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = CONFIG_MAP[model_name]["image_size"]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__lowercase , )
return preprocessor
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_UpperCAmelCase = sorted(set(__lowercase ) )
_UpperCAmelCase = len(__lowercase )
_UpperCAmelCase = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'block{b}_expand_conv/kernel:0', f'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((f'block{b}_expand_bn/gamma:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((f'block{b}_expand_bn/beta:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(f'block{b}_expand_bn/moving_mean:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(f'block{b}_expand_bn/moving_variance:0', f'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(f'block{b}_dwconv/depthwise_kernel:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((f'block{b}_bn/gamma:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((f'block{b}_bn/beta:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(f'block{b}_bn/moving_mean:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(f'block{b}_bn/moving_variance:0', f'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((f'block{b}_se_reduce/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((f'block{b}_se_reduce/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((f'block{b}_se_expand/kernel:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((f'block{b}_se_expand/bias:0', f'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(f'block{b}_project_conv/kernel:0', f'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((f'block{b}_project_bn/gamma:0', f'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((f'block{b}_project_bn/beta:0', f'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(f'block{b}_project_bn/moving_mean:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(f'block{b}_project_bn/moving_variance:0', f'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = "efficientnet." + item[1]
_UpperCAmelCase = "classifier.weight"
_UpperCAmelCase = "classifier.bias"
return key_mapping
def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(__lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : Any , __lowercase : int , __lowercase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = model_classes[model_name](
include_top=__lowercase , weights="imagenet" , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1000 , classifier_activation="softmax" , )
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(__lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(__lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_UpperCAmelCase = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(__lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**__lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["image_size"]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(__lowercase )
_UpperCAmelCase = np.expand_dims(__lowercase , axis=0 )
_UpperCAmelCase = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'Pushing converted {model_name} to the hub...' )
_UpperCAmelCase = f'efficientnet-{model_name}'
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__SCREAMING_SNAKE_CASE :Dict = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 156 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : int=1_3 , snake_case_ : int=7 , snake_case_ : Union[str, Any]=True , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Optional[Any]=True , snake_case_ : int=9_9 , snake_case_ : Tuple=3_2 , snake_case_ : Dict=5 , snake_case_ : str=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Any=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=5_1_2 , snake_case_ : List[Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : Any=0.0_2 , snake_case_ : List[str]=False , snake_case_ : Dict=True , snake_case_ : Union[str, Any]="None" , snake_case_ : Dict=3 , snake_case_ : Union[str, Any]=4 , snake_case_ : Dict=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Optional[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase ( self : Optional[int] , snake_case_ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase ( self : str , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ):
_UpperCAmelCase = DebertaVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )[0]
_UpperCAmelCase = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
_UpperCAmelCase = DebertaVaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def lowercase ( self : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : List[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DebertaVaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Any ):
_UpperCAmelCase = DebertaVaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Optional[Any] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str ):
_UpperCAmelCase = DebertaVaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : str ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[str] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Any = True
_lowerCamelCase : int = False
_lowerCamelCase : int = False
_lowerCamelCase : Dict = False
_lowerCamelCase : int = False
def lowercase ( self : List[str] ):
_UpperCAmelCase = DebertaVaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowercase ( self : str ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*snake_case_ )
@slow
def lowercase ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = DebertaVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowercase ( self : Union[str, Any] ):
pass
@slow
def lowercase ( self : List[str] ):
_UpperCAmelCase = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_UpperCAmelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
_UpperCAmelCase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 156 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCAmelCase : str = 5_0_0_0_0_0
_lowerCAmelCase, _lowerCAmelCase : Dict = os.path.split(__file__)
_lowerCAmelCase : Dict = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCAmelCase ( _lowerCAmelCase : datasets.Dataset , **_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dataset.map(**_lowerCAmelCase )
@get_duration
def lowerCAmelCase ( _lowerCAmelCase : datasets.Dataset , **_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dataset.filter(**_lowerCAmelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase__ = generate_example_dataset(
os.path.join(_lowerCAmelCase , "dataset.arrow" ) , _lowerCAmelCase , num_examples=_lowerCAmelCase )
UpperCAmelCase__ = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_lowerCAmelCase )
def tokenize(_lowerCAmelCase : Any ):
return tokenizer(examples["text"] )
UpperCAmelCase__ = map(_lowerCAmelCase )
UpperCAmelCase__ = map(_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="numpy" ):
UpperCAmelCase__ = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="pandas" ):
UpperCAmelCase__ = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
UpperCAmelCase__ = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
UpperCAmelCase__ = map(_lowerCAmelCase , function=lambda _lowerCAmelCase : None , batched=_lowerCAmelCase )
UpperCAmelCase__ = map(_lowerCAmelCase , function=_lowerCAmelCase , batched=_lowerCAmelCase )
UpperCAmelCase__ = filter(_lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase , "wb" ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 169 |
def lowerCAmelCase ( _lowerCAmelCase : int = 100 ):
"""simple docstring"""
UpperCAmelCase__ = set()
UpperCAmelCase__ = 0
UpperCAmelCase__ = n + 1 # maximum limit
for a in range(2 , _lowerCAmelCase ):
for b in range(2 , _lowerCAmelCase ):
UpperCAmelCase__ = a**b # calculates the current power
collect_powers.add(_lowerCAmelCase ) # adds the result to the set
return len(_lowerCAmelCase )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 169 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a : List[Any] = 256047
a : Optional[Any] = 256145
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Any =NllbTokenizer
lowerCamelCase : Dict =NllbTokenizerFast
lowerCamelCase : Optional[int] =True
lowerCamelCase : Union[str, Any] =True
lowerCamelCase : Optional[Any] ={}
def __a ( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
a : int = NllbTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
a : Any = NllbTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
a : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __a ( self ) -> List[Any]:
a : List[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a : int = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
a : Any = tempfile.mkdtemp()
a : Tuple = tokenizer_r.save_pretrained(lowerCAmelCase__ )
a : Optional[Any] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a : Union[str, Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
a : Tuple = tokenizer_r.from_pretrained(lowerCAmelCase__ )
a : Tuple = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
a : List[Any] = tempfile.mkdtemp()
a : Union[str, Any] = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
a : List[str] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
a : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase__ )
a : Any = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
a : Union[str, Any] = tempfile.mkdtemp()
a : Dict = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
a : Optional[int] = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(lowerCAmelCase__ )
a : List[Any] = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
def __a ( self ) -> Optional[int]:
if not self.test_seqaseq:
return
a : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
a : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
a : str = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
a : int = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase__ , tgt_texts=lowerCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
a : Tuple = tokenizer.prepare_seqaseq_batch(
lowerCAmelCase__ , tgt_texts=lowerCAmelCase__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
a : List[str] = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , lowerCAmelCase__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Tuple = [AddedToken("<special>" , lstrip=lowerCAmelCase__ )]
a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Union[str, Any] = tokenizer_r.encode("Hey this is a <special> token" )
a : int = tokenizer_r.encode("<special>" , add_special_tokens=lowerCAmelCase__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
a : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = self.tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ )
a : List[str] = tokenizer_p.encode("Hey this is a <special> token" )
a : Dict = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : int ="""facebook/nllb-200-distilled-600M"""
lowerCamelCase : Tuple =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase : Dict =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCamelCase : Dict =[
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def __a ( cls ) -> List[str]:
a : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
a : int = 1
return cls
def __a ( self ) -> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_6057 )
def __a ( self ) -> Any:
a : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def __a ( self ) -> List[str]:
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
# fmt: off
a : Tuple = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
a : Optional[int] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
a : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : str = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
a : List[Any] = 10
a : Tuple = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_6203, 3] )
def __a ( self ) -> Tuple:
a : Optional[int] = tempfile.mkdtemp()
a : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
a : Dict = NllbTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def __a ( self ) -> str:
a : Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
a : Dict = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __a ( self ) -> List[Any]:
a : str = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt" )
a : Dict = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt" )
a : Union[str, Any] = targets["input_ids"]
a : str = shift_tokens_right(
lowerCAmelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> Tuple:
a : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
} , )
@require_torch
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = True
a : Dict = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
a : Optional[int] = False
a : List[Any] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 79 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : List[str] = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''PerceiverFeatureExtractor''']
a : str = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : List[str] = tokenizer.decode(greedy_ids[0] )
_lowerCamelCase : Tuple = TextIteratorStreamer(__lowerCAmelCase )
_lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
_lowerCamelCase : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = -1
_lowerCamelCase : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : int = model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
_lowerCamelCase : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCamelCase : Any = TextStreamer(__lowerCAmelCase , skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1_0 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCamelCase : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowerCamelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = -1
_lowerCamelCase : Any = torch.ones((1, 5) , device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCamelCase : List[Any] = TextStreamer(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase , max_new_tokens=1 , do_sample=__lowerCAmelCase , streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCamelCase : Any = cs.out[:-1] # Remove the final "\n"
_lowerCamelCase : int = tokenizer(__lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = TextIteratorStreamer(__lowerCAmelCase , timeout=0.0_01 )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
_lowerCamelCase : List[Any] = Thread(target=model.generate , kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 72 | 1 |
import requests
from bsa import BeautifulSoup
def lowercase_ ( A__ = "AAPL" ) -> str:
"""simple docstring"""
snake_case = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
snake_case = BeautifulSoup(requests.get(snake_case__ ).text , "html.parser" )
snake_case = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 365 |
from __future__ import annotations
def lowercase_ ( A__ , A__ , A__ ) -> int | float:
"""simple docstring"""
if len(A__ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
snake_case = (left + right) >> 1 # the middle
snake_case = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
snake_case = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 137 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: list , __UpperCAmelCase: int = 0 ) -> list:
UpperCamelCase__ : Optional[int] = length or len(__UpperCAmelCase )
UpperCamelCase__ : Tuple = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCamelCase__ ,UpperCamelCase__ : List[str] = list_data[i + 1], list_data[i]
UpperCamelCase__ : Any = True
return list_data if not swapped else bubble_sort(__UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = 256
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["melgan"]
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase__ : Optional[int] = math.log(1E-5 ) # Matches MelGAN training.
UpperCamelCase__ : int = 4.0 # Largest value for most examples
UpperCamelCase__ : Optional[int] = 128
self.register_modules(
notes_encoder=__magic_name__, continuous_encoder=__magic_name__, decoder=__magic_name__, scheduler=__magic_name__, melgan=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = output_range
if clip:
UpperCamelCase__ : Union[str, Any] = torch.clip(__magic_name__, self.min_value, self.max_value )
# Scale to [0, 1].
UpperCamelCase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=(-1.0, 1.0), __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = input_range
UpperCamelCase__ : Any = torch.clip(__magic_name__, __magic_name__, __magic_name__ ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase__ : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = input_tokens > 0
UpperCamelCase__ ,UpperCamelCase__ : Any = self.notes_encoder(
encoder_input_tokens=__magic_name__, encoder_inputs_mask=__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.continuous_encoder(
encoder_inputs=__magic_name__, encoder_inputs_mask=__magic_name__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = noise_time
if not torch.is_tensor(__magic_name__ ):
UpperCamelCase__ : Tuple = torch.tensor([timesteps], dtype=torch.long, device=input_tokens.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : Dict = timesteps * torch.ones(input_tokens.shape[0], dtype=timesteps.dtype, device=timesteps.device )
UpperCamelCase__ : List[str] = self.decoder(
encodings_and_masks=__magic_name__, decoder_input_tokens=__magic_name__, decoder_noise_time=__magic_name__ )
return logits
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__ = None, __magic_name__ = 100, __magic_name__ = True, __magic_name__ = "numpy", __magic_name__ = None, __magic_name__ = 1, ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__, __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__magic_name__ )}." )
UpperCamelCase__ : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims], dtype=np.floataa )
UpperCamelCase__ : Tuple = np.zeros([1, 0, self.n_dims], np.floataa )
UpperCamelCase__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
for i, encoder_input_tokens in enumerate(__magic_name__ ):
if i == 0:
UpperCamelCase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device, dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase__ : Any = torch.zeros((1, TARGET_FEATURE_LENGTH), dtype=__magic_name__, device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase__ : List[str] = ones
UpperCamelCase__ : int = self.scale_features(
__magic_name__, output_range=[-1.0, 1.0], clip=__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ), continuous_inputs=__magic_name__, continuous_mask=__magic_name__, )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase__ : Optional[int] = randn_tensor(
shape=encoder_continuous_inputs.shape, generator=__magic_name__, device=self.device, dtype=self.decoder.dtype, )
# set step values
self.scheduler.set_timesteps(__magic_name__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase__ : Union[str, Any] = self.decode(
encodings_and_masks=__magic_name__, input_tokens=__magic_name__, noise_time=t / self.scheduler.config.num_train_timesteps, )
# Compute previous output: x_t -> x_t-1
UpperCamelCase__ : List[Any] = self.scheduler.step(__magic_name__, __magic_name__, __magic_name__, generator=__magic_name__ ).prev_sample
UpperCamelCase__ : List[Any] = self.scale_to_features(__magic_name__, input_range=[-1.0, 1.0] )
UpperCamelCase__ : List[Any] = mel[:1]
UpperCamelCase__ : int = mel.cpu().float().numpy()
UpperCamelCase__ : Union[str, Any] = np.concatenate([full_pred_mel, pred_mel[:1]], axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__, __magic_name__ )
logger.info('''Generated segment''', __magic_name__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
UpperCamelCase__ : Optional[int] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase__ : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__magic_name__ )
| 201 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
a : List[Any] = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
a : Dict = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
a : Optional[Any] = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : List[str] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __lowerCamelCase ( _lowercase = 1_0_0 ) -> int:
UpperCAmelCase : int = 1
UpperCAmelCase : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase : Tuple = pre_numerator
UpperCAmelCase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase : Union[str, Any] = cur_numerator
UpperCAmelCase : Optional[int] = e_cont * pre_numerator + temp
return sum_digits(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 338 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
snake_case_ : int = logging.getLogger(__name__)
snake_case_ : Dict = {"facebook/bart-base": BartForConditionalGeneration}
snake_case_ : str = {"facebook/bart-base": BartTokenizer}
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__A , default=__A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__A , default=__A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--config_name''' , type=__A , default=__A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__A , default=__A , help='''Where to store the final ONNX file.''' )
UpperCAmelCase_ = parser.parse_args()
return args
def A (__A : List[str] , __A : Optional[Any]="cpu" ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = model_dict[model_name].from_pretrained(__A ).to(__A )
UpperCAmelCase_ = tokenizer_dict[model_name].from_pretrained(__A )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
return huggingface_model, tokenizer
def A (__A : int , __A : int , __A : Optional[Any] , __A : Any , __A : List[str] ) -> Optional[int]:
"""simple docstring"""
model.eval()
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.jit.script(BARTBeamSearchGenerator(__A ) )
with torch.no_grad():
UpperCAmelCase_ = '''My friends are cool but they eat too many carbs.'''
UpperCAmelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
UpperCAmelCase_ = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__A , max_length=__A , early_stopping=__A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__A , )
logger.info('''Model exported to {}'''.format(__A ) )
UpperCAmelCase_ = remove_dup_initializers(os.path.abspath(__A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__A ) )
UpperCAmelCase_ = onnxruntime.InferenceSession(__A )
UpperCAmelCase_ = ort_sess.run(
__A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__A ),
'''max_length''': np.array(__A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = parse_args()
UpperCAmelCase_ = 5
UpperCAmelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ = load_model_tokenizer(args.model_name_or_path , __A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__A )
if args.max_length:
UpperCAmelCase_ = args.max_length
if args.num_beams:
UpperCAmelCase_ = args.num_beams
if args.output_file_path:
UpperCAmelCase_ = args.output_file_path
else:
UpperCAmelCase_ = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__A , __A , __A , __A , __A )
if __name__ == "__main__":
main()
| 51 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_ , a_=False):
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head'):
snake_case_ = 'segformer.encoder.' + key
if key.startswith('backbone'):
snake_case_ = key.replace('backbone' , 'segformer.encoder')
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case_ = key[key.find('patch_embed') + len('patch_embed')]
snake_case_ = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(a_)-1}''')
if "norm" in key:
snake_case_ = key.replace('norm' , 'layer_norm')
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case_ = key[key.find('segformer.encoder.layer_norm') + len('segformer.encoder.layer_norm')]
snake_case_ = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(a_)-1}''')
if "layer_norm1" in key:
snake_case_ = key.replace('layer_norm1' , 'layer_norm_1')
if "layer_norm2" in key:
snake_case_ = key.replace('layer_norm2' , 'layer_norm_2')
if "block" in key:
# replace for example block1 by block.0
snake_case_ = key[key.find('block') + len('block')]
snake_case_ = key.replace(f'''block{idx}''' , f'''block.{int(a_)-1}''')
if "attn.q" in key:
snake_case_ = key.replace('attn.q' , 'attention.self.query')
if "attn.proj" in key:
snake_case_ = key.replace('attn.proj' , 'attention.output.dense')
if "attn" in key:
snake_case_ = key.replace('attn' , 'attention.self')
if "fc1" in key:
snake_case_ = key.replace('fc1' , 'dense1')
if "fc2" in key:
snake_case_ = key.replace('fc2' , 'dense2')
if "linear_pred" in key:
snake_case_ = key.replace('linear_pred' , 'classifier')
if "linear_fuse" in key:
snake_case_ = key.replace('linear_fuse.conv' , 'linear_fuse')
snake_case_ = key.replace('linear_fuse.bn' , 'batch_norm')
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case_ = key[key.find('linear_c') + len('linear_c')]
snake_case_ = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(a_)-1}''')
if key.startswith('head'):
snake_case_ = key.replace('head' , 'classifier')
snake_case_ = value
return new_state_dict
def __UpperCAmelCase ( a_ , a_):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks):
for j in range(config.depths[i]):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''')
snake_case_ = state_dict.pop(f'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''')
# next, add keys and values (in that order) to the state dict
snake_case_ = kv_weight[
: config.hidden_sizes[i], :
]
snake_case_ = kv_bias[: config.hidden_sizes[i]]
snake_case_ = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case_ = kv_bias[
config.hidden_sizes[i] :
]
def __UpperCAmelCase ( ):
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(a_ , stream=a_).raw)
return image
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = SegformerConfig()
snake_case_ = False
# set attributes based on model_name
snake_case_ = 'huggingface/label-files'
if "segformer" in model_name:
snake_case_ = model_name[len('segformer.') : len('segformer.') + 2]
if "ade" in model_name:
snake_case_ = 1_50
snake_case_ = 'ade20k-id2label.json'
snake_case_ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
snake_case_ = 19
snake_case_ = 'cityscapes-id2label.json'
snake_case_ = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'''Model {model_name} not supported''')
elif "mit" in model_name:
snake_case_ = True
snake_case_ = model_name[4:6]
snake_case_ = 10_00
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = (1, 10_00)
else:
raise ValueError(f'''Model {model_name} not supported''')
# set config attributes
snake_case_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset') , 'r'))
snake_case_ = {int(a_): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 2_56
elif size == "b2":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 4, 6, 3]
elif size == "b3":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 4, 18, 3]
elif size == "b4":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 8, 27, 3]
elif size == "b5":
snake_case_ = [64, 1_28, 3_20, 5_12]
snake_case_ = 7_68
snake_case_ = [3, 6, 40, 3]
else:
raise ValueError(f'''Size {size} not supported''')
# load image processor (only resize + normalize)
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=a_ , align=a_ , do_random_crop=a_)
# prepare image
snake_case_ = prepare_img()
snake_case_ = image_processor(images=a_ , return_tensors='pt').pixel_values
logger.info(f'''Converting model {model_name}...''')
# load original state dict
if encoder_only:
snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))
else:
snake_case_ = torch.load(a_ , map_location=torch.device('cpu'))['state_dict']
# rename keys
snake_case_ = rename_keys(a_ , encoder_only=a_)
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_)
# create HuggingFace model and load state dict
if encoder_only:
snake_case_ = False
snake_case_ = SegformerForImageClassification(a_)
else:
snake_case_ = SegformerForSemanticSegmentation(a_)
model.load_state_dict(a_)
model.eval()
# forward pass
snake_case_ = model(a_)
snake_case_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
])
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
])
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
])
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
])
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case_ = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
])
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case_ = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
])
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
])
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
])
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case_ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
])
elif model_name == "segformer.b0.768x768.city.160k":
snake_case_ = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
])
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
])
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
])
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
])
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
])
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case_ = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
])
else:
snake_case_ = logits.argmax(-1).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx])
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1E-2)
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(a_).mkdir(exist_ok=a_)
model.save_pretrained(a_)
image_processor.save_pretrained(a_)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 178 | 0 |
"""simple docstring"""
import unittest
import numpy as np
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , ) -> np.ndarray:
'''simple docstring'''
lowercase : List[Any] = np.shape(_UpperCAmelCase )
lowercase : int = np.shape(_UpperCAmelCase )
lowercase : Dict = np.shape(_UpperCAmelCase )
if shape_a[0] != shape_b[0]:
lowercase : Optional[int] = (
'Expected the same number of rows for A and B. '
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_UpperCAmelCase )
if shape_b[1] != shape_c[1]:
lowercase : List[Any] = (
'Expected the same number of columns for B and C. '
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_UpperCAmelCase )
lowercase : Any = pseudo_inv
if a_inv is None:
try:
lowercase : Union[str, Any] = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class a__ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ) -> None:
lowercase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : str = np.array([[2, 1], [6, 3]] )
lowercase : str = schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowercase : Union[str, Any] = np.block([[a, b], [b.T, c]] )
lowercase : Union[str, Any] = np.linalg.det(lowerCAmelCase )
lowercase : List[str] = np.linalg.det(lowerCAmelCase )
lowercase : List[str] = np.linalg.det(lowerCAmelCase )
self.assertAlmostEqual(lowerCAmelCase, det_a * det_s )
def lowercase ( self : Dict ) -> None:
lowercase : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : Optional[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase ):
schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Union[str, Any] ) -> None:
lowercase : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase ):
schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 53 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase: List[str] = logging.get_logger(__name__)
_UpperCamelCase: List[str] = {'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase: str = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['input_ids', 'attention_mask']
_lowerCamelCase = None
def __init__( self : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : Union[str, Any]="<unk>", lowerCAmelCase : Any="<s>", lowerCAmelCase : str="</s>", lowerCAmelCase : Tuple="<pad>", lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=False, **lowerCAmelCase : Optional[Any], ) -> str:
super().__init__(
lowerCAmelCase, lowerCAmelCase, tokenizer_file=lowerCAmelCase, unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, **lowerCAmelCase, )
lowercase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowerCAmelCase ) != add_prefix_space:
lowercase : Dict = getattr(lowerCAmelCase, pre_tok_state.pop('type' ) )
lowercase : Optional[Any] = add_prefix_space
lowercase : List[str] = pre_tok_class(**lowerCAmelCase )
lowercase : List[str] = add_prefix_space
def lowercase ( self : Dict, *lowerCAmelCase : Tuple, **lowerCAmelCase : List[Any] ) -> BatchEncoding:
lowercase : str = kwargs.get('is_split_into_words', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[Any], *lowerCAmelCase : Dict, **lowerCAmelCase : Dict ) -> BatchEncoding:
lowercase : List[str] = kwargs.get('is_split_into_words', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowercase : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def lowercase ( self : Tuple, lowerCAmelCase : "Conversation" ) -> List[int]:
lowercase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
lowercase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 53 | 1 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class _lowercase :
'''simple docstring'''
def __init__( self : Dict ) -> None:
__lowerCAmelCase = [2, 1, 2, -1]
__lowerCAmelCase = [1, 2, 3, 4]
def a ( self : List[Any] ) -> list[float]:
__lowerCAmelCase = len(self.first_signal )
__lowerCAmelCase = len(self.second_signal )
__lowerCAmelCase = max(__snake_case , __snake_case )
# create a zero matrix of max_length x max_length
__lowerCAmelCase = [[0] * max_length for i in range(__snake_case )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__snake_case ):
__lowerCAmelCase = deque(self.second_signal )
rotated_signal.rotate(__snake_case )
for j, item in enumerate(__snake_case ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowerCAmelCase = np.matmul(np.transpose(__snake_case ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__snake_case , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 229 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = GPTSwaTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =GPTSwaTokenizer(__snake_case , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __snake_case ) -> Tuple:
'''simple docstring'''
__a ='This is a test'
__a ='This is a test'
return input_text, output_text
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a ='<s>'
__a =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__snake_case ) , 2000 )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =GPTSwaTokenizer(__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [465, 287, 265, 631, 842] )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
# fmt: off
self.assertListEqual(
__snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =GPTSwaTokenizer(__snake_case )
__a =['This is a test', 'I was born in 92000, and this is falsé.']
__a =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__snake_case , __snake_case ):
self.assertListEqual(tokenizer.encode_fast(__snake_case ) , __snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(__snake_case , __snake_case ):
self.assertEqual(tokenizer.decode_fast(__snake_case ) , __snake_case )
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
__a ={'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__snake_case , )
| 218 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Optional[Any] =logging.get_logger(__name__)
__snake_case : List[str] ={
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""xlm-roberta"""
def __init__(self ,__lowerCamelCase=3_05_22 ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=1 ,__lowerCamelCase=0 ,__lowerCamelCase=2 ,__lowerCamelCase="absolute" ,__lowerCamelCase=True ,__lowerCamelCase=None ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : Dict = position_embedding_type
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : int = classifier_dropout
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 355 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__(self ,__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = hidden_states.shape
lowerCAmelCase__ : Dict = jax.image.resize(
__lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method='''nearest''' ,)
lowerCAmelCase__ : Dict = self.conv(__lowerCamelCase )
return hidden_states
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__(self ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.conv(__lowerCamelCase )
return hidden_states
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =None
snake_case_ =0.0
snake_case_ =None
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
lowerCAmelCase__ : Union[str, Any] = nn.Conv(
__lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
lowerCAmelCase__ : int = nn.Dense(__lowerCamelCase ,dtype=self.dtype )
lowerCAmelCase__ : List[Any] = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
lowerCAmelCase__ : Union[str, Any] = nn.Dropout(self.dropout_prob )
lowerCAmelCase__ : Optional[int] = nn.Conv(
__lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
lowerCAmelCase__ : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase__ : Union[str, Any] = None
if use_nin_shortcut:
lowerCAmelCase__ : Optional[Any] = nn.Conv(
__lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding='''VALID''' ,dtype=self.dtype ,)
def __call__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase=True ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = hidden_states
lowerCAmelCase__ : Dict = self.norma(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = nn.swish(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.conva(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase ,1 ) ,1 )
lowerCAmelCase__ : Optional[int] = hidden_states + temb
lowerCAmelCase__ : Optional[int] = self.norma(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = nn.swish(__lowerCamelCase )
lowerCAmelCase__ : int = self.dropout(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
lowerCAmelCase__ : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 94 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if exponent == 1:
return base
if exponent % 2 == 0:
A_ : Tuple = _modexpt(__lowerCamelCase , exponent // 2 , __lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__lowerCamelCase , exponent - 1 , __lowerCamelCase )) % modulo_value
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 1_777 , SCREAMING_SNAKE_CASE = 1_855 , SCREAMING_SNAKE_CASE = 8 ):
A_ : int = base
for _ in range(1 , __lowerCamelCase ):
A_ : Tuple = _modexpt(__lowerCamelCase , __lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 186 |
from __future__ import annotations
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = []
__snake_case , __snake_case : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : Tuple = result + left + right
return input_list
def lowerCAmelCase_ ( __lowerCamelCase ):
if len(__lowerCamelCase ) <= 1:
return input_list
__snake_case : Optional[Any] = list(__lowerCamelCase )
# iteration for two-way merging
__snake_case : Dict = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
__snake_case : Union[str, Any] = i
__snake_case : int = i + p - 1
__snake_case : Optional[int] = (low + high + 1) // 2
__snake_case : Any = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
__snake_case : Optional[Any] = i
__snake_case : Dict = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_snake_case : int = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_snake_case : str = []
else:
_snake_case : int = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 123 | 0 |
import os
import time
import numpy as np
import onnxruntime as ort
snake_case_ = '''1'''
snake_case_ = '''0'''
snake_case_ = '''1'''
snake_case_ = ort.SessionOptions()
snake_case_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
snake_case_ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
snake_case_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
snake_case_ = ort.RunOptions()
snake_case_ = 128
snake_case_ = 1
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
snake_case_ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
snake_case_ = time.time()
snake_case_ = 2_000
snake_case_ = {}
for iter in range(max_iters):
snake_case_ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_000 / max_iters))
| 216 |
from __future__ import annotations
import math
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : int = str(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if len(str(SCREAMING_SNAKE_CASE_ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE_ )[:3] ) ):
return False
return True
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 11 ):
'''simple docstring'''
lowercase__ : list[int] = []
lowercase__ : Tuple = 13
while len(SCREAMING_SNAKE_CASE_ ) != count:
if validate(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = list_truncated_nums(SCREAMING_SNAKE_CASE_ )
if all(is_prime(SCREAMING_SNAKE_CASE_ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE_ )
num += 2
return list_truncated_primes
def snake_case__ ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 216 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = (IPNDMScheduler,)
_lowerCAmelCase = (("""num_inference_steps""", 5_0),)
def __UpperCAmelCase ( self , **__magic_name__ ) -> Dict:
_a = {'num_train_timesteps': 10_00}
config.update(**__magic_name__ )
return config
def __UpperCAmelCase ( self , __magic_name__=0 , **__magic_name__ ) -> List[str]:
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop('num_inference_steps' , __magic_name__ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**__magic_name__ )
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
if time_step is None:
_a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
_a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
_a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
_a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self , __magic_name__=0 , **__magic_name__ ) -> List[Any]:
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop('num_inference_steps' , __magic_name__ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[:]
if time_step is None:
_a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
_a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[:]
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
_a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
_a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , **__magic_name__ ) -> Dict:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__magic_name__ )
_a = scheduler_class(**__magic_name__ )
_a = 10
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_a = model(__magic_name__ , __magic_name__ )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop('num_inference_steps' , __magic_name__ )
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**__magic_name__ )
_a = self.dummy_sample
_a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , 'set_timesteps' ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , 'set_timesteps' ):
_a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_a = dummy_past_residuals[:]
_a = scheduler.timesteps[5]
_a = scheduler.timesteps[6]
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
_a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self ) -> List[Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.full_loop()
_a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 168 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a_ : str = _symbol_database.Default()
a_ : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a_ : List[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a_ : List[str] = None
a_ : Tuple = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a_ : Optional[int] = 4_5
a_ : Union[str, Any] = 1_5_8_1
a_ : List[Any] = 1_5_1_7
a_ : str = 1_5_7_0
a_ : List[Any] = 1_5_8_4
a_ : str = 1_7_9_3
a_ : List[str] = 1_7_9_5
a_ : Any = 1_9_1_6
a_ : List[str] = 1_8_6_4
a_ : Optional[Any] = 1_9_0_5
a_ : int = 1_9_1_9
a_ : int = 2_4_2_9
a_ : Dict = 2_2_0_8
a_ : Any = 2_4_1_8
a_ : Union[str, Any] = 2_3_2_3
a_ : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 168 | 1 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =int(__UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =divmod(__UpperCamelCase , 2 )
return binary_recursive(__UpperCamelCase ) + str(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> str:
_lowerCAmelCase =str(__UpperCamelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_lowerCAmelCase ="""-""" if number.startswith("""-""" ) else """"""
_lowerCAmelCase =number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(__UpperCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['PerceiverFeatureExtractor']
__A = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''codegen'''
snake_case = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , __UpperCAmelCase : List[str]=50400 , __UpperCAmelCase : Optional[int]=2048 , __UpperCAmelCase : str=2048 , __UpperCAmelCase : Any=4096 , __UpperCAmelCase : Union[str, Any]=28 , __UpperCAmelCase : Dict=16 , __UpperCAmelCase : Any=64 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Optional[int]=1E-5 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : int=50256 , __UpperCAmelCase : Optional[int]=50256 , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
_A = vocab_size
_A = n_ctx
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = rotary_dim
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = use_cache
_A = bos_token_id
_A = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : str = "default" , __UpperCAmelCase : List[PatchingSpec] = None , __UpperCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ):
# TODO: how to do that better?
_A = 0
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
_A = {0: "batch", 1: "past_sequence + sequence"}
else:
_A = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self._config.n_head
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
_A = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_A = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_A , _A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_A = seqlen + 2
_A = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_A = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_A = common_inputs["attention_mask"]
if self.use_past:
_A = ordered_inputs["attention_mask"].dtype
_A = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 79 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ):
'''simple docstring'''
super().__init__()
_A = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
_A = None
_A = torch.nn.Parameter(__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_A = self.learned_classifier_free_sampling_embeddings.embeddings
_A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
_A = [""] * batch_size
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = negative_prompt_embeds.shape[1]
_A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
_A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' )
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCAmelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_A = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_A = self.transformer.num_vector_embeds - 1
_A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
_A = self.scheduler.timesteps.to(self.device )
_A = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
_A , _A = model_output.chunk(2 )
_A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
_A = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
_A = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.vqvae.config.vq_embed_dim
_A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
_A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
_A = torch.exp(__UpperCAmelCase )
_A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
_A = torch.cat((all_true, keep_mask) , dim=1 )
_A = keep_mask[:, :-1, :]
_A = keep_mask.gather(1 , indices.argsort(1 ) )
_A = log_p_x_0.clone()
_A = -torch.inf # -inf = log(0)
return rv
| 79 | 1 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = """pytorch_model.bin"""
@dataclasses.dataclass
class A__ :
A_ : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'})
A_ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class A__ :
A_ : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
A_ : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
A_ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={'help': 'A csv or a json file containing the validation data.'})
A_ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={'help': 'The name of the task to train on.'} , )
A_ : Optional[List[str]] = dataclasses.field(
default=__snake_case , metadata={'help': 'The list of labels for the task.'})
@dataclasses.dataclass
class A__ :
A_ : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
A_ : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'})
A_ : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'
} , )
A_ : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
A_ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
A_ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
A_ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
A_ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
A_ : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
A_ : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
A_ : Optional[int] = dataclasses.field(
default=__snake_case , metadata={'help': 'Random seed for initialization.'} , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowerCAmelCase : List[str] = dataset.filter(lambda _UpperCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowerCAmelCase : List[str] = int(eval_result * len(_a ) )
print(_a )
__lowerCAmelCase : int = dataset.sort('probability' , reverse=_a )
__lowerCAmelCase : Optional[int] = dataset.select(range(_a ) )
__lowerCAmelCase : List[str] = dataset.remove_columns(['label', 'probability'] )
__lowerCAmelCase : Optional[Any] = dataset.rename_column('prediction' , 'label' )
__lowerCAmelCase : Union[str, Any] = dataset.map(lambda _UpperCamelCase : {"label": idalabel[example["label"]]} )
__lowerCAmelCase : int = dataset.shuffle(seed=args.seed )
__lowerCAmelCase : int = os.path.join(_a , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(_a , index=_a )
else:
dataset.to_json(_a )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowerCAmelCase : Tuple = STModelArguments(model_name_or_path=_a )
__lowerCAmelCase : str = STDataArguments(train_file=_a , infer_file=_a )
__lowerCAmelCase : Optional[Any] = STTrainingArguments(output_dir=_a )
__lowerCAmelCase : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_a ).items():
setattr(_a , _a , _a )
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
# Sanity checks
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowerCAmelCase : List[Any] = args.train_file
__lowerCAmelCase : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowerCAmelCase : Dict = args.eval_file
for key in data_files:
__lowerCAmelCase : List[str] = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
__lowerCAmelCase : int = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
__lowerCAmelCase : int = F"{args.output_dir}/self-train_iter-{{}}".format
__lowerCAmelCase : List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_a )
os.makedirs(_a , exist_ok=_a )
accelerator.wait_for_everyone()
__lowerCAmelCase : Any = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = False
# Show the progress bar
__lowerCAmelCase : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowerCAmelCase : Any = data_dir_format(_a )
assert os.path.exists(_a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowerCAmelCase : List[str] = os.path.join(_a , 'stage-1' )
__lowerCAmelCase : Optional[int] = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_a , _a ):
arguments_dict.update({key: value} )
__lowerCAmelCase : Any = os.path.join(_a , 'best-checkpoint' , _a )
if os.path.exists(_a ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _a , _a , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowerCAmelCase : Dict = os.path.join(_a , 'best-checkpoint' )
__lowerCAmelCase : str = os.path.join(_a , 'stage-2' )
# Update arguments_dict
__lowerCAmelCase : Union[str, Any] = model_path
__lowerCAmelCase : Dict = data_files["""train"""]
__lowerCAmelCase : List[str] = current_output_dir
__lowerCAmelCase : str = os.path.join(_a , 'best-checkpoint' , _a )
if os.path.exists(_a ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _a , _a , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _a )
__lowerCAmelCase : Optional[Any] = iteration
__lowerCAmelCase : List[str] = data_dir_format(iteration + 1 )
__lowerCAmelCase : List[str] = AutoConfig.from_pretrained(os.path.join(_a , 'best-checkpoint' ) )
__lowerCAmelCase : str = config.idalabel
__lowerCAmelCase : Union[str, Any] = os.path.join(_a , 'eval_results_best-checkpoint.json' )
__lowerCAmelCase : int = os.path.join(_a , 'test_results_best-checkpoint.json' )
assert os.path.exists(_a )
with open(_a , 'r' ) as f:
__lowerCAmelCase : Optional[int] = float(json.load(_a )[args.eval_metric] )
__lowerCAmelCase : Dict = os.path.join(_a , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(_a )
# Loading the dataset from local csv or json files.
__lowerCAmelCase : Optional[Any] = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )["""data"""]
__lowerCAmelCase : List[str] = load_dataset('csv' , data_files={'data': infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(_a , exist_ok=_a )
shutil.copy(_a , os.path.join(_a , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(_a ):
shutil.copy(_a , os.path.join(_a , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a )
accelerator.wait_for_everyone()
__lowerCAmelCase : Tuple = os.path.join(_a , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowerCAmelCase : Optional[Any] = eval_result
if best_iteration is None:
__lowerCAmelCase : Optional[int] = new_iteration
__lowerCAmelCase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowerCAmelCase : List[str] = new_iteration
__lowerCAmelCase : Union[str, Any] = new_eval_result
__lowerCAmelCase : int = 0
else:
if new_eval_result == best_eval_result:
__lowerCAmelCase : Dict = new_iteration
__lowerCAmelCase : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowerCAmelCase : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _a )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F"eval_results_iter-{iteration}.json" ) , os.path.join(_a , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(_a , 'eval_results_best-iteration.json' ) , ) | 365 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) <= 1:
return lst
__lowerCAmelCase : str = 1
while i < len(_UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowerCAmelCase : int = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted)) | 182 | 0 |
from __future__ import annotations
from collections.abc import Callable
lowercase__ : Tuple = list[list[float | int]]
def A_ ( snake_case : Matrix , snake_case : Matrix ) -> Matrix:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[0 for _ in range(size + 1 )] for _ in range(snake_case )]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for row in range(snake_case ):
for col in range(snake_case ):
__UpperCamelCase = matrix[row][col]
__UpperCamelCase = vector[row][0]
__UpperCamelCase = 0
__UpperCamelCase = 0
while row < size and col < size:
# pivoting
__UpperCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case , snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCamelCase , __UpperCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case ):
__UpperCamelCase = augmented[rowa][col] / augmented[row][col]
__UpperCamelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case ):
for row in range(snake_case ):
__UpperCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case )
]
def A_ ( snake_case : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
__UpperCamelCase = [[0 for _ in range(snake_case )] for _ in range(snake_case )]
__UpperCamelCase = [[0] for _ in range(snake_case )]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for x_val, y_val in enumerate(snake_case ):
for col in range(snake_case ):
__UpperCamelCase = (x_val + 1) ** (size - col - 1)
__UpperCamelCase = y_val
__UpperCamelCase = solve(snake_case , snake_case )
def interpolated_func(snake_case : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case ) )
return interpolated_func
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A_ ( snake_case : Callable[[int], int] = question_function , snake_case : int = 10 ) -> int:
'''simple docstring'''
__UpperCamelCase = [func(snake_case ) for x_val in range(1 , order + 1 )]
__UpperCamelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCamelCase = 0
__UpperCamelCase = 42
__UpperCamelCase = 42
for poly in polynomials:
__UpperCamelCase = 1
while func(snake_case ) == poly(snake_case ):
x_val += 1
ret += poly(snake_case )
return ret
if __name__ == "__main__":
print(F"{solution() = }")
| 328 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
"""simple docstring"""
import math
import sys
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Tuple = ''
try:
with open(_A , """rb""" ) as binary_file:
lowerCAmelCase_ :Dict = binary_file.read()
for dat in data:
lowerCAmelCase_ :Any = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( lowercase__ : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = {'0': '0', '1': '1'}
lowerCAmelCase_ :Optional[int] = '', ''
lowerCAmelCase_ :Optional[Any] = len(_A )
for i in range(len(_A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase_ :Any = lexicon[curr_string]
result += last_match_id
lowerCAmelCase_ :Dict = last_match_id + '0'
if math.loga(_A ).is_integer():
lowerCAmelCase_ :str = {}
for curr_key in list(_A ):
lowerCAmelCase_ :Any = lexicon.pop(_A )
lowerCAmelCase_ :Tuple = new_lex
lowerCAmelCase_ :List[str] = last_match_id + '1'
index += 1
lowerCAmelCase_ :Dict = ''
return result
def _snake_case ( lowercase__ : Dict , lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 8
try:
with open(_A , """wb""" ) as opened_file:
lowerCAmelCase_ :Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_A ) , _A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_A , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( lowercase__ : Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase_ :Optional[Any] = data_bits[counter:]
lowerCAmelCase_ :str = data_bits[counter + 1 :]
return data_bits
def _snake_case ( lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = read_file_binary(_A )
lowerCAmelCase_ :List[str] = remove_prefix(_A )
lowerCAmelCase_ :str = decompress_data(_A )
write_file_binary(_A , _A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
UpperCAmelCase_ : Any = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
UpperCAmelCase_ : List[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowerCAmelCase__ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 91 | import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase__ : Any = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
lowercase__ : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
return s_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = emb.weight.shape
lowerCAmelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCAmelCase = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> bytes:
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase = os.path.basename(snake_case__ )
lowerCAmelCase = url.split('''/''' )[-2]
lowerCAmelCase = os.path.join(snake_case__ , snake_case__ )
if os.path.exists(snake_case__ ) and not os.path.isfile(snake_case__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case__ ):
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case__ ) as source, open(snake_case__ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=snake_case__ , unit_divisor=1_0_2_4 ) as loop:
while True:
lowerCAmelCase = source.read(8_1_9_2 )
if not buffer:
break
output.write(snake_case__ )
loop.update(len(snake_case__ ) )
lowerCAmelCase = open(snake_case__ , '''rb''' ).read()
if hashlib.shaaaa(snake_case__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if ".pt" not in checkpoint_path:
lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
lowerCAmelCase = torch.load(snake_case__ , map_location='''cpu''' )
lowerCAmelCase = original_checkpoint['''dims''']
lowerCAmelCase = original_checkpoint['''model_state_dict''']
lowerCAmelCase = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case__ )
rename_keys(snake_case__ )
lowerCAmelCase = True
lowerCAmelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
lowerCAmelCase = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case__ , decoder_ffn_dim=snake_case__ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
lowerCAmelCase = WhisperForConditionalGeneration(snake_case__ )
lowerCAmelCase , lowerCAmelCase = model.model.load_state_dict(snake_case__ , strict=snake_case__ )
if len(snake_case__ ) > 0 and not set(snake_case__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCAmelCase = proj_out_weights
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase__ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase_ = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase_ = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
lowerCAmelCase_ = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __a ( self : int ) -> Dict:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __a ( self : int , _A : int , _A : List[str] , _A : str = CHRF.CHAR_ORDER , _A : Tuple = CHRF.WORD_ORDER , _A : Optional[Any] = CHRF.BETA , _A : Dict = False , _A : Union[str, Any] = False , _A : Optional[Any] = False , ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase : Any = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase : Union[str, Any] = CHRF(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase : Any = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
} | 365 |
class _A : # Public class to implement a graph
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : Tuple = row
lowercase : Union[str, Any] = col
lowercase : int = graph
def __a ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self : int , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __a ( self : List[str] ) -> int: # And finally, count all islands.
"""simple docstring"""
lowercase : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 116 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 8
# DPR tok
__UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
__UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__A , exist_ok=__A )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def _lowerCamelCase ( self : Tuple ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def _lowerCamelCase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def _lowerCamelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self : Any , __A : bool ):
__UpperCamelCase = self.get_dummy_dataset()
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__UpperCamelCase = os.path.join(self.tmpdirname , 'dataset' )
__UpperCamelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def _lowerCamelCase ( self : int ):
__UpperCamelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCamelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__UpperCamelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__UpperCamelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__A , open(__A , 'wb' ) )
__UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __A )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __A )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
__UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Optional[Any] ):
import torch
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
__UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors='pt' , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__UpperCamelCase = 1
__UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
__UpperCamelCase = [[5, 7], [1_0, 1_1]]
__UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __A ) # check for doc token related keys in dictionary.
| 53 |
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase : int = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
_UpperCAmelCase : Tuple = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
_UpperCAmelCase : List[Any] = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
lowerCamelCase__ : Optional[int] = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : Tuple = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : List[str] = float(pearsonr(_UpperCAmelCase , _UpperCAmelCase )[0] )
lowerCamelCase__ : Optional[int] = float(spearmanr(_UpperCAmelCase , _UpperCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def A_ ( self : str ) -> Tuple:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A_ ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ) -> Tuple:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase , UpperCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCAmelCase , UpperCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 45 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_UpperCAmelCase : Any = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCAmelCase__ = datasets.Audio()
UpperCAmelCase__ = """audio"""
UpperCAmelCase__ = AudioFolderConfig
UpperCAmelCase__ = 42 # definition at the bottom of the script
UpperCAmelCase__ = AudioClassification(audio_column="""audio""", label_column="""label""" )
_UpperCAmelCase : Union[str, Any] = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_UpperCAmelCase : Union[str, Any] = AUDIO_EXTENSIONS
| 45 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : Optional[int] = copy.deepcopy(self )
_lowerCAmelCase : int = self.label_schema.copy()
_lowerCAmelCase : Dict = features[self.label_column]
_lowerCAmelCase : Tuple = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if not batched:
_UpperCamelCase = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCamelCase , _UpperCamelCase = image.size
else:
_UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
_UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_UpperCamelCase = self.size['''shortest_edge''']
_UpperCamelCase = self.size['''shortest_edge''']
else:
_UpperCamelCase = []
for image in image_inputs:
_UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
_UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
_UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''image_id''': 39769, '''annotations''': target}
# encode them
_UpperCamelCase = DeformableDetrImageProcessor()
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_UpperCamelCase = json.loads(f.read() )
_UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
_UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' )
# verify pixel values
_UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
# verify area
_UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) )
# verify boxes
_UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) )
# verify image_id
_UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) )
# verify is_crowd
_UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) )
# verify class_labels
_UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) )
# verify masks
_UpperCamelCase = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ )
# verify orig_size
_UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) )
# verify size
_UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
| 324 | 0 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
for i in range(1 , _SCREAMING_SNAKE_CASE ):
_snake_case = collection[i]
_snake_case = 0
_snake_case = i - 1
while low <= high:
_snake_case = (low + high) // 2
if val < collection[mid]:
_snake_case = mid - 1
else:
_snake_case = mid + 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 ):
_snake_case = collection[j - 1]
_snake_case = val
return collection
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted)) | 270 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_snake_case = 0
_snake_case = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_snake_case = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_snake_case = i
_snake_case = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 1 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : str = cva.getAffineTransform(_a , _a )
return cva.warpAffine(_a , _a , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase :Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
lowerCAmelCase :Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase :Tuple = gray_img.shape
# set different points to rotate image
lowerCAmelCase :Tuple = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
lowerCAmelCase :str = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
lowerCAmelCase :Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
lowerCAmelCase :int = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
lowerCAmelCase :Tuple = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase :Tuple = plt.figure(1)
lowerCAmelCase :Optional[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 331 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Optional[Any] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['PoolFormerFeatureExtractor']
_snake_case : int = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 179 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case_ (UpperCamelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any:
"""simple docstring"""
_a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
_a = model_type
_a = tf_checkpoint
_a = pytorch_dump_output
_a = config
_a = finetuning_task_name
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
_a = self._tf_checkpoint
_a = ''''''
else:
_a = self._tf_checkpoint
_a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 179 | 1 |
from ...configuration_utils import PretrainedConfig
class _lowercase ( a_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''bert-generation'''
def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any]=50_358 , lowerCAmelCase__ :Optional[Any]=1_024 , lowerCAmelCase__ :Dict=24 , lowerCAmelCase__ :int=16 , lowerCAmelCase__ :List[Any]=4_096 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]=512 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :Optional[Any]=1E-1_2 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :Tuple=True , **lowerCAmelCase__ :Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : Optional[int] = use_cache
| 9 |
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = arr.split("," )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase : Tuple =input("""please input some numbers:""")
UpperCAmelCase : Optional[int] =SubArray(whole_array)
UpperCAmelCase : List[Any] =array.solve_sub_array()
print(("""the results is:""", re))
| 128 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , _lowerCAmelCase : Optional[Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
__snake_case : Tuple = label_idx
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[Split, str] ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = mode.value
__snake_case : Optional[Any] = os.path.join(_lowerCAmelCase , f'''{mode}.txt''' )
__snake_case : int = 1
__snake_case : Optional[int] = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = []
__snake_case : int = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
__snake_case : Optional[int] = []
__snake_case : Dict = []
else:
__snake_case : Any = line.split(""" """ )
words.append(splits[0] )
if len(_lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
return examples
def snake_case__ ( self : int , _lowerCAmelCase : TextIO , _lowerCAmelCase : TextIO , _lowerCAmelCase : List ):
__snake_case : List[str] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__snake_case : List[Any] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_lowerCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def snake_case__ ( self : int , _lowerCAmelCase : str ):
if path:
with open(_lowerCAmelCase , """r""" ) as f:
__snake_case : Dict = f.read().splitlines()
if "O" not in labels:
__snake_case : str = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : List[Any] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case__ ( self : List[str] , _lowerCAmelCase : str ):
if path:
with open(_lowerCAmelCase , """r""" ) as f:
__snake_case : Tuple = f.read().splitlines()
if "O" not in labels:
__snake_case : int = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def snake_case__ ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[Split, str] ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = mode.value
__snake_case : Any = os.path.join(_lowerCAmelCase , f'''{mode}.txt''' )
__snake_case : List[Any] = 1
__snake_case : Any = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(_lowerCAmelCase ):
__snake_case : int = []
__snake_case : str = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
return examples
def snake_case__ ( self : Any , _lowerCAmelCase : TextIO , _lowerCAmelCase : TextIO , _lowerCAmelCase : List ):
__snake_case : List[Any] = 0
for sentence in parse_incr(_lowerCAmelCase ):
__snake_case : str = preds_list[example_id]
__snake_case : List[Any] = """"""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(_lowerCAmelCase )
example_id += 1
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : str ):
if path:
with open(_lowerCAmelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 366 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : List[str] = "xlm"
A : List[str] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=3_01_45 , _lowerCAmelCase : Optional[Any]=20_48 , _lowerCAmelCase : Dict=12 , _lowerCAmelCase : int=16 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Dict=False , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=5_12 , _lowerCAmelCase : List[Any]=20_48**-0.5 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple="first" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Union[str, Any]=0 , **_lowerCAmelCase : Tuple , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Tuple = emb_dim
__snake_case : int = n_layers
__snake_case : List[str] = n_heads
__snake_case : Union[str, Any] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Optional[Any] = gelu_activation
__snake_case : Tuple = sinusoidal_embeddings
__snake_case : List[Any] = causal
__snake_case : Dict = asm
__snake_case : int = n_langs
__snake_case : str = use_lang_emb
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = bos_index
__snake_case : Union[str, Any] = eos_index
__snake_case : Dict = pad_index
__snake_case : Any = unk_index
__snake_case : Dict = mask_index
__snake_case : Any = is_encoder
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = embed_init_std
__snake_case : List[Any] = init_std
__snake_case : str = summary_type
__snake_case : Optional[Any] = summary_use_proj
__snake_case : str = summary_activation
__snake_case : Optional[int] = summary_proj_to_labels
__snake_case : Dict = summary_first_dropout
__snake_case : Dict = start_n_top
__snake_case : int = end_n_top
__snake_case : str = mask_token_id
__snake_case : int = lang_id
if "n_words" in kwargs:
__snake_case : Dict = kwargs["""n_words"""]
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
@property
def snake_case__ ( self : Dict ):
if self.task == "multiple-choice":
__snake_case : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 20 | 0 |
def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 ):
"""simple docstring"""
a__ : List[str] =right or len(SCREAMING_SNAKE_CASE ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[str] = ShapEImgaImgPipeline
lowercase__ : Dict = ["image"]
lowercase__ : Optional[int] = ["image"]
lowercase__ : Optional[Any] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase__ : List[str] = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return 8
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ = CLIPVisionModel(lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_resize=lowerCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCAmelCase__ = PriorTransformer(**lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ = ShapERenderer(**lowerCamelCase_ )
return model
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.dummy_prior
lowerCAmelCase__ = self.dummy_image_encoder
lowerCAmelCase__ = self.dummy_image_processor
lowerCAmelCase__ = self.dummy_renderer
lowerCAmelCase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , )
lowerCAmelCase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> Union[str, Any]:
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ = batch_size * [inputs[key]]
lowerCAmelCase__ = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCAmelCase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
lowerCAmelCase__ = pipe(
lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ ) | 228 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = CLIPTokenizer
lowercase__ : List[str] = CLIPTokenizerFast
lowercase__ : Dict = True
lowercase__ : Any = {}
lowercase__ : Optional[int] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
super().setUp()
# fmt: off
lowerCAmelCase__ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase__ = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowerCAmelCase__ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowerCAmelCase__ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , **lowerCamelCase_ ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = '''lower newer'''
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ = '''lower newer'''
lowerCAmelCase__ = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowerCAmelCase__ = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase__ = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase__ = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase__ = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase__ = tokenizer_s.tokenize(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer_r.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase__ = F"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowerCAmelCase__ = F""" {text}"""
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , )
lowerCAmelCase__ = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCamelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().test_tokenization_python_rust_equals()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# CLIP always lower cases letters
pass | 228 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase__ : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase__ : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase__ : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 25 | '''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(snake_case_ )
UpperCAmelCase_ = 5_12
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ )
UpperCAmelCase_ = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_: str =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = '''wavlm'''
def __init__( self, A=32, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=0.1, A=0.0, A=0.1, A=0.1, A=0.02, A=1E-5, A="group", A="gelu", A=(512, 512, 512, 512, 512, 512, 512), A=(5, 2, 2, 2, 2, 2, 2), A=(10, 3, 3, 3, 3, 2, 2), A=False, A=128, A=16, A=320, A=800, A=False, A=True, A=0.05, A=10, A=2, A=0.0, A=10, A=320, A=2, A=0.1, A=100, A=256, A=256, A=0.1, A="mean", A=False, A=False, A=256, A=(512, 512, 512, 512, 1_500), A=(5, 3, 3, 1, 1), A=(1, 2, 3, 1, 1), A=512, A=80, A=0, A=1, A=2, A=False, A=3, A=2, A=3, A=None, **A, ):
'''simple docstring'''
super().__init__(**A, pad_token_id=A, bos_token_id=A, eos_token_id=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = feat_extract_norm
SCREAMING_SNAKE_CASE : Tuple = feat_extract_activation
SCREAMING_SNAKE_CASE : Dict = list(A )
SCREAMING_SNAKE_CASE : List[str] = list(A )
SCREAMING_SNAKE_CASE : List[Any] = list(A )
SCREAMING_SNAKE_CASE : int = conv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = num_buckets
SCREAMING_SNAKE_CASE : List[Any] = max_bucket_distance
SCREAMING_SNAKE_CASE : str = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : Tuple = activation_dropout
SCREAMING_SNAKE_CASE : str = feat_proj_dropout
SCREAMING_SNAKE_CASE : List[str] = final_dropout
SCREAMING_SNAKE_CASE : Any = layerdrop
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = num_ctc_classes
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = do_stable_layer_norm
SCREAMING_SNAKE_CASE : Optional[int] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : Union[str, Any] = apply_spec_augment
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_time_prob
SCREAMING_SNAKE_CASE : List[Any] = mask_time_length
SCREAMING_SNAKE_CASE : int = mask_time_min_masks
SCREAMING_SNAKE_CASE : int = mask_feature_prob
SCREAMING_SNAKE_CASE : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : Optional[int] = num_codevectors_per_group
SCREAMING_SNAKE_CASE : int = num_codevector_groups
SCREAMING_SNAKE_CASE : Tuple = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : Dict = num_negatives
SCREAMING_SNAKE_CASE : List[str] = codevector_dim
SCREAMING_SNAKE_CASE : str = proj_codevector_dim
SCREAMING_SNAKE_CASE : List[str] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : Union[str, Any] = add_adapter
SCREAMING_SNAKE_CASE : List[Any] = adapter_kernel_size
SCREAMING_SNAKE_CASE : Dict = adapter_stride
SCREAMING_SNAKE_CASE : Optional[int] = num_adapter_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : str = list(A )
SCREAMING_SNAKE_CASE : List[str] = list(A )
SCREAMING_SNAKE_CASE : int = list(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = xvector_output_dim
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 246 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCamelCase ):
if len(__UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCamelCase ) )
return data_lists
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[list[float]] = []
for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = min(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = max(__UpperCamelCase )
SCREAMING_SNAKE_CASE : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Invalid weight of {weight:f} provided"
raise ValueError(__UpperCamelCase )
score_lists.append(__UpperCamelCase )
return score_lists
def lowercase__( __UpperCamelCase: list[list[float]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = final_scores[j] + ele
return final_scores
def lowercase__( __UpperCamelCase: list[list[float]] ,__UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_data(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Dict = calculate_each_score(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = generate_final_scores(__UpperCamelCase )
# append scores to source data
for i, ele in enumerate(__UpperCamelCase ):
source_data[i].append(__UpperCamelCase )
return source_data
| 246 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase : Optional[int] = ["pixel_values"]
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PIL.Image.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = 1 / 2_5_5 , snake_case_ = True , snake_case_ = True , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
super().__init__(**lowerCamelCase__ )
_lowerCAmelCase : List[str] = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
_lowerCAmelCase : Tuple = get_size_dict(lowerCamelCase__ )
_lowerCAmelCase : Tuple = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_lowerCAmelCase : Tuple = get_size_dict(lowerCamelCase__ , param_name="""crop_size""" )
_lowerCAmelCase : List[Any] = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : str = do_center_crop
_lowerCAmelCase : Dict = crop_size
_lowerCAmelCase : List[str] = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : List[str] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = PIL.Image.BICUBIC , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Any = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
lowerCamelCase__ , size=(size["""height"""], size["""width"""]) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(lowerCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[int] = size if size is not None else self.size
_lowerCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
_lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Tuple = get_size_dict(lowerCamelCase__ , param_name="""crop_size""" )
_lowerCAmelCase : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_lowerCAmelCase : List[str] = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Tuple = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
_lowerCAmelCase : Optional[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_lowerCAmelCase : Tuple = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_lowerCAmelCase : Any = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_lowerCAmelCase : int = {"""pixel_values""": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 309 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[Any] = """▁"""
SCREAMING_SNAKE_CASE_:int = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:Any = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""google/pegasus-xsum""": 512,
}
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="<pad>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__="<mask_2>", lowerCamelCase__="<mask_1>", lowerCamelCase__=None, lowerCamelCase__=103, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : int = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase__ )}, but is'''
f''' {type(lowerCamelCase__ )}''' )
A : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase__ ), self.offset - 1 )
]
if len(set(lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
A : int = additional_special_tokens_extended
else:
A : Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2, self.offset )]
A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, mask_token=lowerCamelCase__, pad_token=lowerCamelCase__, mask_token_sent=lowerCamelCase__, offset=lowerCamelCase__, additional_special_tokens=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
A : Union[str, Any] = mask_token_sent
A : Optional[Any] = vocab_file
A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# add special tokens to encoder dict
A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _lowerCAmelCase ( self ):
return len(self.sp_model ) + self.offset
def _lowerCAmelCase ( self ):
A : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A : List[Any] = self.__dict__.copy()
A : Union[str, Any] = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : int = {}
A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A : List[str] = self.sp_model.piece_to_id(lowerCamelCase__ )
return sp_id + self.offset
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = []
A : Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
A : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def _lowerCAmelCase ( self, lowerCamelCase__=False ):
return 1
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Any = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 116 | 0 |
from __future__ import annotations
from math import pi, sqrt
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a_ ( _snake_case ):
UpperCamelCase__ : Dict ="openai/whisper-base"
UpperCamelCase__ : int =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase__ : Any ="transcriber"
UpperCamelCase__ : Optional[int] =WhisperProcessor
UpperCamelCase__ : List[str] =WhisperForConditionalGeneration
UpperCamelCase__ : List[Any] =["audio"]
UpperCamelCase__ : Union[str, Any] =["text"]
def __a ( self :int , _lowercase :Any) -> Tuple:
return self.pre_processor(_lowercase , return_tensors='''pt''').input_features
def __a ( self :Dict , _lowercase :Tuple) -> Any:
return self.model.generate(inputs=_lowercase)
def __a ( self :int , _lowercase :Union[str, Any]) -> Optional[Any]:
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase)[0]
| 344 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'levit'
def __init__( self , _a=224 , _a=3 , _a=3 , _a=2 , _a=1 , _a=16 , _a=[128, 256, 384] , _a=[4, 8, 12] , _a=[4, 4, 4] , _a=[16, 16, 16] , _a=0 , _a=[2, 2, 2] , _a=[2, 2, 2] , _a=0.02 , **_a , ):
super().__init__(**_a )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int ) -> int:
__a = len(lowerCAmelCase__ )
__a = int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
__a = 0
while arr[min(lowerCAmelCase__ , lowerCAmelCase__ ) - 1] < x:
__a = step
step += int(math.floor(math.sqrt(lowerCAmelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__a = prev + 1
if prev == min(lowerCAmelCase__ , lowerCAmelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
lowercase_ = int(input("Enter the number to be searched:\n"))
lowercase_ = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 45 | 1 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[str]:
return [sentence[i : i + ngram_size] for i in range(len(_SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 203 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: Any = limit + 1
a__: List[str] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 203 | 1 |
"""simple docstring"""
import numpy as np
def lowerCAmelCase_ ( snake_case_ : np.array ) ->np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 126 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase = """true"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=8_2 , snake_case_ : Optional[Any]=1_6 ) ->Dict:
set_seed(4_2 )
lowerCamelCase__ : List[Any] =RegressionModel()
lowerCamelCase__ : List[Any] =deepcopy(snake_case_ )
lowerCamelCase__ : List[str] =RegressionDataset(length=snake_case_ )
lowerCamelCase__ : Any =DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : str=False ) ->List[str]:
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ : List[Any] =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case_ : Optional[Any] ):
lowerCamelCase__ : Optional[int] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : Tuple =dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ : List[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] =Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ : List[Any] =get_dataloader(snake_case_ , not dispatch_batches )
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[Any] =[]
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =[], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : Optional[int]=8_2 , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=1_6 ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Any =generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"""
def lowerCAmelCase_ ( snake_case_ : bool = False , snake_case_ : bool = False ) ->str:
lowerCamelCase__ : Dict =evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =setup['no']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**snake_case_ )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['labels'] )
lowerCamelCase__ : Optional[Any] =metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : int =batch['labels']
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowerCamelCase__ : List[str] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : List[str] =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : Dict =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ : List[Any] =Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 126 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__ = None ):
if components is None:
A : Tuple = []
A : Optional[int] = list(_UpperCAmelCase )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_UpperCAmelCase, self.__components ) ) + ")"
def __add__( self, lowerCamelCase__ ):
A : str = len(self )
if size == len(_UpperCAmelCase ):
A : Optional[Any] = [self.__components[i] + other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self, lowerCamelCase__ ):
A : str = len(self )
if size == len(_UpperCAmelCase ):
A : str = [self.__components[i] - other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return Vector(_UpperCAmelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self, lowerCamelCase__ ):
...
@overload
def __mul__( self, lowerCamelCase__ ):
...
def __mul__( self, lowerCamelCase__ ):
if isinstance(_UpperCAmelCase, (float, int) ):
A : Optional[Any] = [c * other for c in self.__components]
return Vector(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase, _UpperCAmelCase ) and len(self ) == len(_UpperCAmelCase ):
A : List[Any] = len(self )
A : Union[str, Any] = [self.__components[i] * other.component(_UpperCAmelCase ) for i in range(_UpperCAmelCase )]
return sum(_UpperCAmelCase )
else: # error case
raise Exception("""invalid operand!""" )
def _lowerCAmelCase ( self ):
return Vector(self.__components )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
assert -len(self.__components ) <= pos < len(self.__components )
A : Dict = value
def _lowerCAmelCase ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
A : Tuple = [c**2 for c in self.__components]
return math.sqrt(sum(_UpperCAmelCase ) )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = False ):
A : Dict = self * other
A : Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase ( _lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
return Vector([0] * dimension )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ))
A : List[str] = [0] * dimension
A : Optional[Any] = 1
return Vector(lowerCAmelCase_ )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and (isinstance(lowerCAmelCase_ , (int, float) ))
)
return x * scalar + y
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Vector:
"""simple docstring"""
random.seed(lowerCAmelCase_ )
A : int = [random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : str = matrix
A : int = w
A : Any = h
def __str__( self ):
A : Optional[Any] = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self, lowerCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A : Any = []
for i in range(self.__height ):
A : Optional[Any] = [
self.__matrix[i][j] + other.component(_UpperCAmelCase, _UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(_UpperCAmelCase )
return Matrix(_UpperCAmelCase, self.__width, self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self, lowerCamelCase__ ):
if self.__width == other.width() and self.__height == other.height():
A : Tuple = []
for i in range(self.__height ):
A : int = [
self.__matrix[i][j] - other.component(_UpperCAmelCase, _UpperCAmelCase )
for j in range(self.__width )
]
matrix.append(_UpperCAmelCase )
return Matrix(_UpperCAmelCase, self.__width, self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self, lowerCamelCase__ ):
...
@overload
def __mul__( self, lowerCamelCase__ ):
...
def __mul__( self, lowerCamelCase__ ):
if isinstance(_UpperCAmelCase, _UpperCAmelCase ): # matrix-vector
if len(_UpperCAmelCase ) == self.__width:
A : str = zero_vector(self.__height )
for i in range(self.__height ):
A : Any = [
self.__matrix[i][j] * other.component(_UpperCAmelCase )
for j in range(self.__width )
]
ans.change_component(_UpperCAmelCase, sum(_UpperCAmelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(_UpperCAmelCase, (int, float) ): # matrix-scalar
A : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_UpperCAmelCase, self.__width, self.__height )
return None
def _lowerCAmelCase ( self ):
return self.__height
def _lowerCAmelCase ( self ):
return self.__width
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A : Union[str, Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
A : List[str] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_UpperCAmelCase ) ):
A : Tuple = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_UpperCAmelCase, self.__width - 1, self.__height - 1 ).determinant()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_UpperCAmelCase, _UpperCAmelCase )
else:
raise Exception("""Indices out of bounds""" )
def _lowerCAmelCase ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A : List[str] = [
self.__matrix[0][y] * self.cofactor(0, _UpperCAmelCase ) for y in range(self.__width )
]
return sum(_UpperCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase ) -> Matrix:
"""simple docstring"""
A : list[list[float]] = [[0] * n for _ in range(lowerCAmelCase_ )]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Matrix:
"""simple docstring"""
random.seed(lowerCAmelCase_ )
A : list[list[float]] = [
[random.randint(lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ )] for _ in range(lowerCAmelCase_ )
]
return Matrix(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 369 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE_:Optional[int] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __UpperCamelCase ( _lowerCAmelCase ) -> Dict:
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[int] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Any = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
A : str = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
A : Optional[Any] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
A : List[str] = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
A : Tuple = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A : List[str] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
A : Optional[int] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
"""simple docstring"""
A : List[str] = {}
import re
A : Any = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : str = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : Union[str, Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
A : Optional[Any] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
A : Optional[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
A : Tuple = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
A : List[Any] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_encoder_block_conv_in.match(_lowerCAmelCase )
A : Tuple = regex_match.groups()
A : str = int(groups[2] ) * 2 + int(groups[3] )
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
A : Any = re_encoder_block_conv_in.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_encoder_block_resnet.match(_lowerCAmelCase )
A : str = regex_match.groups()
A : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
A : Any = {"""1""": 1, """3""": 2}[groups[-2]]
A : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : str = prefix + resnet_block
A : str = re_encoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCAmelCase ):
A : List[str] = re_encoder_block_proj_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
A : Optional[int] = re_encoder_block_proj_out.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCAmelCase ):
A : Union[str, Any] = re_decoder_block_conv_out.match(_lowerCAmelCase )
A : Dict = regex_match.groups()
A : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
A : int = re_decoder_block_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_decoder_block_resnet.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
A : str = {"""1""": 1, """3""": 2}[groups[-2]]
A : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
A : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Tuple = prefix + resnet_block
A : Tuple = re_decoder_block_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCAmelCase ):
A : Optional[Any] = re_decoder_block_proj_in.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Optional[Any] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
A : Dict = re_decoder_block_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCAmelCase ):
A : Optional[int] = re_prior_cond_conv_out.match(_lowerCAmelCase )
A : List[Any] = regex_match.groups()
A : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
A : List[str] = re_prior_cond_conv_out.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCAmelCase ):
A : Any = re_prior_cond_resnet.match(_lowerCAmelCase )
A : Any = regex_match.groups()
A : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
A : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
A : Tuple = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
A : List[str] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
A : Dict = prefix + resnet_block
A : Union[str, Any] = re_prior_cond_resnet.sub(_lowerCAmelCase , _lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCAmelCase ):
A : List[Any] = re_prior_cond_proj_in.match(_lowerCAmelCase )
A : Optional[int] = regex_match.groups()
A : Tuple = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
A : Optional[int] = re_prior_cond_proj_in.sub(_lowerCAmelCase , _lowerCAmelCase )
# keep original key
else:
A : str = original_key
A : List[str] = replace_key(_lowerCAmelCase )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
A : str = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
A : Union[str, Any] = original_key
A : Union[str, Any] = original_key
A : List[str] = value
return new_dict
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Tuple:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ):
A : Optional[Any] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=_lowerCAmelCase )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=_lowerCAmelCase )
open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , """wb""" ).write(r.content )
A : Optional[int] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
A : List[Any] = JukeboxConfig.from_pretrained(_lowerCAmelCase )
A : Dict = JukeboxModel(_lowerCAmelCase )
A : str = []
A : Optional[int] = {}
for i, dict_name in enumerate(_lowerCAmelCase ):
A : str = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["""model"""]
A : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
A : Dict = old_dic[k]
elif k.endswith(""".w""" ):
A : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A : Union[str, Any] = old_dic[k]
else:
A : Optional[int] = old_dic[k]
A : List[str] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
A : List[Any] = fix_jukebox_keys(_lowerCAmelCase , model.state_dict() , _lowerCAmelCase , _lowerCAmelCase )
weight_dict.append(_lowerCAmelCase )
A : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE_:int = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 115 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 84 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase =[
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
_lowerCamelCase =[
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =torch.load(lowerCAmelCase_, map_location='cpu' )
return sd
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =OrderedDict()
SCREAMING_SNAKE_CASE =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE =new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE ='nlvr'
SCREAMING_SNAKE_CASE =VisualBertConfig(**lowerCAmelCase_ )
# Load State Dict
SCREAMING_SNAKE_CASE =load_state_dict(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =get_new_dict(lowerCAmelCase_, lowerCAmelCase_ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE =VisualBertForPreTraining(lowerCAmelCase_ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE =VisualBertForQuestionAnswering(lowerCAmelCase_ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE =VisualBertForVisualReasoning(lowerCAmelCase_ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE =VisualBertForMultipleChoice(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
_lowerCamelCase =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 334 | 0 |
'''simple docstring'''
def _A ( A__ = 4000000 ):
"""simple docstring"""
__lowercase = [0, 1]
__lowercase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowercase = 0
for j in range(len(__snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'{solution() = }')
| 361 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(lowercase__ ,lowercase__ ,lowercase__ ) ,2_1_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.assertRaisesRegex(lowercase__ ,'''Weight can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.assertRaisesRegex(lowercase__ ,'''Profit can not be negative.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
self.assertRaisesRegex(lowercase__ ,'''max_weight must greater than zero.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.assertRaisesRegex(
lowercase__ ,'''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] = False ) -> float:
"""simple docstring"""
if not arr:
return 0
__UpperCamelCase = 0 if allow_empty_subarrays else float('-inf' )
__UpperCamelCase = 0.0
for num in arr:
__UpperCamelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
__UpperCamelCase = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a__ : Union[str, Any] =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 53 |
from math import sqrt
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase : str = False
for divisor in range(2 , int(round(sqrt(SCREAMING_SNAKE_CASE__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase : Any = False
break
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'status' must been from type bool"
return status
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase : str = list(range(2 , n + 1 ) )
lowercase : Tuple = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase : Tuple = 0
# filters actual prime numbers.
lowercase : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n > 2), "'N' must been an int and > 2"
lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(SCREAMING_SNAKE_CASE__ ):
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
lowercase : Optional[Any] = 2
lowercase : Any = number
if number == 0 or number == 1:
ans.append(SCREAMING_SNAKE_CASE__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(SCREAMING_SNAKE_CASE__ ):
while quotient != 1:
if is_prime(SCREAMING_SNAKE_CASE__ ) and (quotient % factor == 0):
ans.append(SCREAMING_SNAKE_CASE__ )
quotient /= factor
else:
factor += 1
else:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type list"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Tuple = 0
# prime factorization of 'number'
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase : Union[str, Any] = 0
# prime factorization of 'number'
lowercase : Tuple = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'ans' must been from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 == 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , SCREAMING_SNAKE_CASE__ ), "compare bust been from type bool"
return number % 2 != 0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (number > 2) and is_even(SCREAMING_SNAKE_CASE__ )
), "'number' must been an int, even and > 2"
lowercase : Union[str, Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase : str = get_prime_numbers(SCREAMING_SNAKE_CASE__ )
lowercase : Any = len(SCREAMING_SNAKE_CASE__ )
# run variable for while-loops.
lowercase : Optional[Any] = 0
lowercase : List[Any] = None
# exit variable. for break up the loops
lowercase : Any = True
while i < len_pn and loop:
lowercase : str = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (len(SCREAMING_SNAKE_CASE__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase : Union[str, Any] = 0
while numbera != 0:
lowercase : Optional[int] = numbera % numbera
lowercase : Optional[int] = numbera
lowercase : Dict = rest
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase : Dict = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase : Optional[Any] = prime_factorization(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = prime_factorization(SCREAMING_SNAKE_CASE__ )
elif numbera == 1 or numbera == 1:
lowercase : Union[str, Any] = []
lowercase : List[str] = []
lowercase : Dict = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Optional[Any] = 0
lowercase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase : Dict = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
ans *= n
else:
lowercase : List[Any] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase : Optional[int] = prime_fac_a.count(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
ans *= n
done.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'number' must been a positive int"
lowercase : Dict = 0
lowercase : List[str] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
ans += 1
# precondition
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and is_prime(
SCREAMING_SNAKE_CASE__ ), "'ans' must been a prime number and from type int"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
assert (
is_prime(SCREAMING_SNAKE_CASE__ ) and is_prime(SCREAMING_SNAKE_CASE__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase : List[str] = p_number_a + 1 # jump to the next number
lowercase : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
while number < p_number_a:
ans.append(SCREAMING_SNAKE_CASE__ )
number += 1
# fetch the next prime number.
while not is_prime(SCREAMING_SNAKE_CASE__ ):
number += 1
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and ans[0] != p_number_a
and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(SCREAMING_SNAKE_CASE__ )
# precondition
assert ans[0] == 1 and ans[len(SCREAMING_SNAKE_CASE__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase : str = get_divisors(SCREAMING_SNAKE_CASE__ )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (divisors[0] == 1)
and (divisors[len(SCREAMING_SNAKE_CASE__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase : Tuple = gcd(abs(SCREAMING_SNAKE_CASE__ ) , abs(SCREAMING_SNAKE_CASE__ ) )
# precondition
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase : List[str] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase : int = 0
lowercase : Union[str, Any] = 1
lowercase : int = 1 # this will be return
for _ in range(n - 1 ):
lowercase : Optional[int] = ans
ans += fiba
lowercase : Optional[int] = tmp
return ans
| 20 | 0 |
def A__ ( __lowerCamelCase = 10_00 ):
return sum(e for e in range(3, __lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 257 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
SCREAMING_SNAKE_CASE_ = (low + high) // 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = max_subarray(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = max_subarray(__lowerCamelCase, mid + 1, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = max_cross_sum(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = float('''-inf''' ), -1
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = float('''-inf''' ), -1
SCREAMING_SNAKE_CASE_ = 0
for i in range(__lowerCamelCase, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
SCREAMING_SNAKE_CASE_ = summ
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
SCREAMING_SNAKE_CASE_ = summ
SCREAMING_SNAKE_CASE_ = i
return max_left, max_right, (left_sum + right_sum)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [randint(1, __lowerCamelCase ) for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = time.time()
max_subarray(__lowerCamelCase, 0, input_size - 1 )
SCREAMING_SNAKE_CASE_ = time.time()
return end - start
def A__ ( ):
SCREAMING_SNAKE_CASE_ = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
SCREAMING_SNAKE_CASE_ = [time_max_subarray(__lowerCamelCase ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(__lowerCamelCase, __lowerCamelCase ):
print(__lowerCamelCase, '''\t\t''', __lowerCamelCase )
plt.plot(__lowerCamelCase, __lowerCamelCase )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 257 | 1 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowercase , (list, tuple) ) or not all(
isinstance(__lowercase , __lowercase ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
a_ = a_ = a_ = numbers[0]
for i in range(1 , len(__lowercase ) ):
# update the maximum and minimum subarray products
a_ = numbers[i]
if number < 0:
a_ , a_ = min_till_now, max_till_now
a_ = max(__lowercase , max_till_now * number )
a_ = min(__lowercase , min_till_now * number )
# update the maximum product found till now
a_ = max(__lowercase , __lowercase )
return max_prod | 243 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A__ : Optional[int] =logging.get_logger(__name__)
A__ : str ={
'''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''',
'''microsoft/deberta-v2-xlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'''
),
'''microsoft/deberta-v2-xxlarge-mnli''': (
'''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'''
),
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Any = '''deberta-v2'''
def __init__( self : Dict , __snake_case : int=12_81_00 , __snake_case : List[Any]=15_36 , __snake_case : Optional[Any]=24 , __snake_case : Dict=24 , __snake_case : List[Any]=61_44 , __snake_case : Dict="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : List[str]=0 , __snake_case : Union[str, Any]=0.02 , __snake_case : Dict=1E-7 , __snake_case : List[str]=False , __snake_case : Tuple=-1 , __snake_case : Optional[int]=0 , __snake_case : Any=True , __snake_case : Optional[int]=None , __snake_case : List[str]=0 , __snake_case : str="gelu" , **__snake_case : Any , ) -> List[Any]:
super().__init__(**__snake_case )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = relative_attention
_lowerCAmelCase = max_relative_positions
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = position_biased_input
# Backwards compatibility
if type(__snake_case ) == str:
_lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
_lowerCAmelCase = pos_att_type
_lowerCAmelCase = vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = kwargs.get("""pooler_hidden_size""" , __snake_case )
_lowerCAmelCase = pooler_dropout
_lowerCAmelCase = pooler_hidden_act
class UpperCAmelCase ( snake_case_ ):
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowercase__ ( self : Optional[int] ) -> int:
return 12
def lowercase__ ( self : Optional[int] , __snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 3 , __snake_case : int = 40 , __snake_case : int = 40 , __snake_case : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = super().generate_dummy_inputs(preprocessor=__snake_case , framework=__snake_case )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 357 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A__ : Optional[int] ='''CompVis/stable-diffusion-v1-1'''
A__ : Optional[int] ='''CompVis/stable-diffusion-v1-2'''
A__ : List[str] ='''CompVis/stable-diffusion-v1-3'''
A__ : Optional[int] ='''CompVis/stable-diffusion-v1-4'''
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Any , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : StableDiffusionSafetyChecker , __snake_case : CLIPImageProcessor , __snake_case : bool = True , ) -> Any:
super()._init_()
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__snake_case )
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__snake_case )
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__snake_case )
_lowerCAmelCase = StableDiffusionPipeline(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , requires_safety_checker=__snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase__ ( self : List[Any] ) -> Dict[str, Any]:
return {k: getattr(self , __snake_case ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowercase__ ( self : List[Any] , __snake_case : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowercase__ ( self : str ) -> int:
self.enable_attention_slicing(__snake_case )
@torch.no_grad()
def lowercase__ ( self : Any , __snake_case : Union[str, List[str]] , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 50 , __snake_case : float = 7.5 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , **__snake_case : Tuple , ) -> Tuple:
return self.pipea(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
@torch.no_grad()
def lowercase__ ( self : Dict , __snake_case : Union[str, List[str]] , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 50 , __snake_case : float = 7.5 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , **__snake_case : int , ) -> List[Any]:
return self.pipea(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
@torch.no_grad()
def lowercase__ ( self : Any , __snake_case : Union[str, List[str]] , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 50 , __snake_case : float = 7.5 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , **__snake_case : Tuple , ) -> Dict:
return self.pipea(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
@torch.no_grad()
def lowercase__ ( self : Dict , __snake_case : Union[str, List[str]] , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 50 , __snake_case : float = 7.5 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , **__snake_case : str , ) -> List[Any]:
return self.pipea(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
@torch.no_grad()
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, List[str]] , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 50 , __snake_case : float = 7.5 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , **__snake_case : Tuple , ) -> Optional[Any]:
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
_lowerCAmelCase = self.textaimg_sda_a(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
_lowerCAmelCase = self.textaimg_sda_a(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
_lowerCAmelCase = self.textaimg_sda_a(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
_lowerCAmelCase = self.textaimg_sda_a(
prompt=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , **__snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 220 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase__ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int, _lowerCAmelCase : Optional[int] ) -> Dict:
_UpperCAmelCase : Union[str, Any] = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
def UpperCamelCase ( _lowerCAmelCase : List[Any] ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase : Tuple = key.replace("""backbone.0.body""", """backbone.conv_encoder.model""" )
_UpperCAmelCase : Any = value
else:
_UpperCAmelCase : List[Any] = value
return new_state_dict
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple=False ) -> Optional[Any]:
_UpperCAmelCase : int = """"""
if is_panoptic:
_UpperCAmelCase : str = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[:256, :]
_UpperCAmelCase : Tuple = in_proj_bias[:256]
_UpperCAmelCase : Optional[int] = in_proj_weight[256:512, :]
_UpperCAmelCase : str = in_proj_bias[256:512]
_UpperCAmelCase : int = in_proj_weight[-256:, :]
_UpperCAmelCase : List[Any] = in_proj_bias[-256:]
def UpperCamelCase ( ) -> Any:
_UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase : Dict = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ) -> List[Any]:
_UpperCAmelCase : str = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_UpperCAmelCase : Dict = """resnet101"""
if "dc5" in model_name:
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Optional[Any] = """panoptic""" in model_name
if is_panoptic:
_UpperCAmelCase : Optional[int] = 250
else:
_UpperCAmelCase : str = 91
_UpperCAmelCase : Optional[int] = """huggingface/label-files"""
_UpperCAmelCase : str = """coco-detection-id2label.json"""
_UpperCAmelCase : Any = json.load(open(hf_hub_download(_lowerCAmelCase, _lowerCAmelCase, repo_type="""dataset""" ), """r""" ) )
_UpperCAmelCase : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
_UpperCAmelCase : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
_UpperCAmelCase : int = ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=_lowerCAmelCase, return_tensors="""pt""" )
_UpperCAmelCase : Any = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
_UpperCAmelCase : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""", _lowerCAmelCase, pretrained=_lowerCAmelCase ).eval()
_UpperCAmelCase : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_UpperCAmelCase : Optional[int] = """conditional_detr.""" + src
rename_key(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase, is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase : Optional[Any] = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase : Tuple = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase : Any = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase : Union[str, Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase, organization="""DepuMeng""", commit_message="""Add model""" )
# verify our conversion
_UpperCAmelCase : Any = conditional_detr(_lowerCAmelCase )
_UpperCAmelCase : int = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits, original_outputs["""pred_logits"""], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs["""pred_boxes"""], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs["""pred_masks"""], atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase__ : int = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 246 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : int=(), _lowerCAmelCase : Union[str, Any]=None, _lowerCAmelCase : Union[str, Any]="no", _lowerCAmelCase : Optional[int]="29500" ) -> Any:
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
_UpperCAmelCase : Union[str, Any] = True
elif "IPython" in sys.modules:
_UpperCAmelCase : Dict = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
_UpperCAmelCase : int = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""", _lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : int = PrepareForLaunch(_lowerCAmelCase, distributed_type="""TPU""" )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*_lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase, master_addr="""127.0.01""", master_port=_lowerCAmelCase, mixed_precision=_lowerCAmelCase ):
_UpperCAmelCase : Any = PrepareForLaunch(_lowerCAmelCase, distributed_type="""MULTI_GPU""" )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_UpperCAmelCase : Union[str, Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : List[str]=(), _lowerCAmelCase : Optional[int]=2 ) -> Tuple:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_lowerCAmelCase, master_addr="""127.0.01""", master_port="""29500""", accelerate_mixed_precision="""no""", accelerate_debug_rdv_file=tmp_file.name, accelerate_use_cpu="""yes""", ):
_UpperCAmelCase : Tuple = PrepareForLaunch(_lowerCAmelCase, debug=_lowerCAmelCase )
start_processes(_lowerCAmelCase, args=_lowerCAmelCase, nprocs=_lowerCAmelCase, start_method="""fork""" )
| 246 | 1 |
def snake_case_ ( snake_case ) -> Optional[Any]:
lowercase__: List[str] = len(snake_case )
for i in range(length - 1 ):
lowercase__: int = i
for k in range(i + 1 , snake_case ):
if collection[k] < collection[least]:
lowercase__: Tuple = k
if least != i:
lowercase__ , lowercase__: Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 288 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __a ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
'''simple docstring'''
super().__init__()
lowercase__: Union[str, Any] = pad_token_id
lowercase__: List[str] = max_length
lowercase__: int = vocab
lowercase__: List[Any] = merges
lowercase__: str = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = [' '.join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()]
lowercase__: List[Any] = tokenizer.get_vocab()
return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__: int = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return cls(**lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = self.tf_tokenizer(lowerCAmelCase__ )
lowercase__: List[Any] = tf.ones_like(lowerCAmelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowercase__: int = max_length if max_length is not None else self.max_length
if max_length is not None:
lowercase__ , lowercase__: List[Any] = pad_model_inputs(
lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 288 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def lowerCAmelCase_ ( __UpperCAmelCase: int = "https://www.worldometers.info/coronavirus/" ) -> Tuple:
UpperCamelCase__ : Any = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(lowercase__ ).content ).xpath(lowercase__ ) )
UpperCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 201 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
snake_case__ : List[str] = GPTSwaTokenizer
snake_case__ : Dict = False
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
a_ : List[Any] = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
a_ : int = """This is a test"""
a_ : Optional[Any] = """This is a test"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
a_ : Optional[Any] = """<s>"""
a_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 2_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : int = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
a_ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : str = GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
a_ : int = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
a_ : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
a_ : Tuple = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=SCREAMING_SNAKE_CASE__ , )
| 356 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Any = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : Dict = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Tuple = self.get_env()
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Dict = '1'
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : Union[str, Any] = '\nfrom transformers import pipeline\n '
a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a_ : Union[str, Any] = self.get_env()
a_ : Optional[Any] = '1'
a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Optional[int] = '\nfrom transformers import AutoModel\n '
a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Optional[Any] = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Optional[int] = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="shi-labs/oneformer_demo" ):
"""simple docstring"""
with open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) as f:
lowerCAmelCase_ = json.load(snake_case__ )
lowerCAmelCase_ = {}
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for key, info in class_info.items():
lowerCAmelCase_ = info['name']
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(snake_case__ ) )
lowerCAmelCase_ = thing_ids
lowerCAmelCase_ = class_names
return metadata
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=10 , _UpperCamelCase=False , _UpperCamelCase=255 , _UpperCamelCase="shi-labs/oneformer_demo" , _UpperCamelCase="ade20k_panoptic.json" , _UpperCamelCase=10 , ) -> List[str]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = class_info_file
lowerCAmelCase_ = prepare_metadata(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = num_text
lowerCAmelCase_ = repo_path
# for the post_process_functions
lowerCAmelCase_ = 2
lowerCAmelCase_ = 10
lowerCAmelCase_ = 10
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = do_reduce_labels
lowerCAmelCase_ = ignore_index
def __a ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __a ( self , _UpperCamelCase , _UpperCamelCase=False ) -> Union[str, Any]:
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase_ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase_ = self.size['shortest_edge']
lowerCAmelCase_ = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase_ = self.size['shortest_edge']
lowerCAmelCase_ = self.size['shortest_edge']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda _UpperCamelCase : item[0] )[0]
lowerCAmelCase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def __a ( self ) -> int:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _lowerCAmelCase ( a__ , unittest.TestCase ):
_lowercase =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowercase =image_processing_class
def __a ( self ) -> List[str]:
lowerCAmelCase_ = OneFormerImageProcessorTester(self )
@property
def __a ( self ) -> int:
return self.image_processing_tester.prepare_image_processor_dict()
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "ignore_index" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "class_info_file" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "num_text" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "repo_path" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "metadata" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_reduce_labels" ) )
def __a ( self ) -> Optional[int]:
pass
def __a ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = image_processor(
SCREAMING_SNAKE_CASE_ , ["semantic"] * len(SCREAMING_SNAKE_CASE_ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Optional[int]:
# Initialize image_processor
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = image_processor(
SCREAMING_SNAKE_CASE_ , ["semantic"] * len(SCREAMING_SNAKE_CASE_ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self ) -> Any:
# Initialize image_processor
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
lowerCAmelCase_ = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = image_processor(
SCREAMING_SNAKE_CASE_ , ["semantic"] * len(SCREAMING_SNAKE_CASE_ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __a ( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase="np" ) -> int:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowerCAmelCase_ = self.image_processing_tester.num_labels
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
if with_segmentation_maps:
lowerCAmelCase_ = num_labels
if is_instance_map:
lowerCAmelCase_ = list(range(SCREAMING_SNAKE_CASE_ ) ) * 2
lowerCAmelCase_ = dict(enumerate(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowerCAmelCase_ = [Image.fromarray(SCREAMING_SNAKE_CASE_ ) for annotation in annotations]
lowerCAmelCase_ = image_processor(
SCREAMING_SNAKE_CASE_ , ["semantic"] * len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , return_tensors="pt" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE_ , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ , )
return inputs
def __a ( self ) -> int:
pass
def __a ( self ) -> Union[str, Any]:
def common(_UpperCamelCase=False , _UpperCamelCase=None ):
lowerCAmelCase_ = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE_ , is_instance_map=SCREAMING_SNAKE_CASE_ , segmentation_type=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ = inputs['mask_labels']
lowerCAmelCase_ = inputs['class_labels']
lowerCAmelCase_ = inputs['pixel_values']
lowerCAmelCase_ = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE_ )
common(is_instance_map=SCREAMING_SNAKE_CASE_ , segmentation_type="pil" )
common(is_instance_map=SCREAMING_SNAKE_CASE_ , segmentation_type="pil" )
def __a ( self ) -> Dict:
lowerCAmelCase_ = np.zeros((20, 50) )
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = binary_mask_to_rle(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowerCAmelCase_ = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase_ = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
lowerCAmelCase_ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowerCAmelCase_ = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE_ , target_sizes=SCREAMING_SNAKE_CASE_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __a ( self ) -> str:
lowerCAmelCase_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowerCAmelCase_ = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase_ = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE_ , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __a ( self ) -> int:
lowerCAmelCase_ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
lowerCAmelCase_ = self.image_processing_tester.get_fake_oneformer_outputs()
lowerCAmelCase_ = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE_ , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 231 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Tuple = "bloom"
UpperCAmelCase__ : List[Any] = ["past_key_values"]
UpperCAmelCase__ : str = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self, SCREAMING_SNAKE_CASE_=25_0880, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : str = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase : Optional[Any] = kwargs.pop('n_embed', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase : Tuple = n_layer
UpperCamelCase : Dict = n_head
UpperCamelCase : List[Any] = layer_norm_epsilon
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : int = use_cache
UpperCamelCase : int = pretraining_tp
UpperCamelCase : Optional[int] = apply_residual_connection_post_layernorm
UpperCamelCase : str = hidden_dropout
UpperCamelCase : str = attention_dropout
UpperCamelCase : List[Any] = bos_token_id
UpperCamelCase : Tuple = eos_token_id
UpperCamelCase : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = version.parse("1.12" )
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : Tuple = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs', inverted_values_shape=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
@property
def snake_case_ ( self ) -> float:
return 1e-3
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Dict = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : Any = seqlen + 2
UpperCamelCase : Optional[int] = self._config.hidden_size // self.num_attention_heads
UpperCamelCase : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase : Optional[Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase : List[str] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : str = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : int = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( __snake_case : Accelerator , __snake_case : int = 16 ):
'''simple docstring'''
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase_ : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Any = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : str = 8
else:
UpperCAmelCase_ : List[Any] = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowercase__ ( __snake_case : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['lr']
UpperCAmelCase_ : List[Any] = int(config['num_epochs'] )
UpperCAmelCase_ : Any = int(config['seed'] )
UpperCAmelCase_ : Optional[Any] = int(config['batch_size'] )
UpperCAmelCase_ : List[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
UpperCAmelCase_ : List[Any] = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ : str = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Tuple = model(**__snake_case )
UpperCAmelCase_ : Tuple = outputs.loss
UpperCAmelCase_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**__snake_case )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : str = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 371 |
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 145 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : int = scope
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(_A , attention_mask=_A )
SCREAMING_SNAKE_CASE__ : str = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE__ : str = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ : List[str] = model(_A , attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , _A ).item() + 1
SCREAMING_SNAKE_CASE__ : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : str = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_A )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : Any = model(_A , attention_mask=_A )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_A , past_key_values=_A , attention_mask=_A )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Any = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptModel(config=_A ).to(_A ).eval()
SCREAMING_SNAKE_CASE__ : str = torch.ones(input_ids.shape , dtype=torch.long , device=_A )
# first forward pass
SCREAMING_SNAKE_CASE__ : List[str] = model(_A , attention_mask=_A , use_cache=_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Dict = model(_A , attention_mask=_A )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Dict = model(_A , attention_mask=_A , past_key_values=_A )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = BioGptModel(_A )
SCREAMING_SNAKE_CASE__ : Any = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : str = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (__a , __a , __a , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCamelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = False
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=_A , hidden_size=37 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Any = type
self.model_tester.create_and_check_model(*_A )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A , gradient_checkpointing=_A )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(_A )
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
SCREAMING_SNAKE_CASE__ : str = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : str = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : List[str] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(_A , return_tensors="""pt""" , padding=_A )
SCREAMING_SNAKE_CASE__ : Dict = inputs['''input_ids'''].to(_A )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
input_ids=_A , attention_mask=inputs["""attention_mask"""].to(_A ) , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(_A )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(input_ids=_A )
SCREAMING_SNAKE_CASE__ : List[Any] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(input_ids=_A , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(_A , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE__ : List[str] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
@slow
def __magic_name__ (self ) -> str:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Dict = input_ids.ne(1 ).to(_A )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : Dict = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ : str = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : int = input_ids.ne(1 ).to(_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE__ : Tuple = 4_23_84
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[-9.5236, -9.8918, 10.45_57], [-11.04_69, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _A , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
SCREAMING_SNAKE_CASE__ : int = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(_A )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(_A )
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(
**_A , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=_A , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_A , _A )
| 25 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = len(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCAmelCase_ ) - pat_len + 1 ):
_UpperCAmelCase : Tuple = True
for j in range(lowerCAmelCase_ ):
if s[i + j] != pattern[j]:
_UpperCAmelCase : Optional[int] = False
break
if match_found:
position.append(lowerCAmelCase_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 170 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = """luke"""
def __init__(self , lowerCAmelCase__=5_0_2_6_7 , lowerCAmelCase__=5_0_0_0_0_0 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Any = entity_vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Dict = entity_emb_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = use_entity_aware_attention
_UpperCAmelCase : Optional[Any] = classifier_dropout
| 170 | 1 |
"""simple docstring"""
def a_ ( lowerCamelCase = 1_0**1_2 ):
UpperCAmelCase__ = 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCamelCase : List[Any] = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__lowerCamelCase : Optional[int] = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
__lowerCamelCase : str = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_ ( _lowerCAmelCase ) -> str:
def remove_articles(_lowerCAmelCase ):
UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase ):
UpperCamelCase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase )
UpperCamelCase : List[Any] = Counter()
for sgram, scount in sgramcounter.items():
UpperCamelCase : Tuple = scount * numref
UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase )
UpperCamelCase : Tuple = Counter()
for cgram, ccount in cgramcounter.items():
UpperCamelCase : Dict = ccount * numref
# KEEP
UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep
UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter
UpperCamelCase : Dict = sgramcounter_rep & rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Any = 1
UpperCamelCase : Any = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCamelCase : Any = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep
UpperCamelCase : str = delgramcounter_rep - rgramcounter
UpperCamelCase : Any = sgramcounter_rep - rgramcounter
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Dict = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCamelCase : Tuple = 1
UpperCamelCase : Tuple = 1
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase )
UpperCamelCase : List[str] = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
UpperCamelCase : int = len(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = ssent.split(" " )
UpperCamelCase : Dict = csent.split(" " )
UpperCamelCase : str = []
UpperCamelCase : Any = []
UpperCamelCase : Any = []
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
UpperCamelCase : str = []
UpperCamelCase : Dict = []
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = []
UpperCamelCase : Tuple = []
for rsent in rsents:
UpperCamelCase : List[Any] = rsent.split(" " )
UpperCamelCase : List[str] = []
UpperCamelCase : int = []
UpperCamelCase : Tuple = []
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0 , len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCamelCase : Dict = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase )
elif tokenizer == "penn":
UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase )
else:
UpperCamelCase : Union[str, Any] = sentence
if not return_str:
UpperCamelCase : Tuple = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
UpperCamelCase : Optional[Any] = 0
for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] )
UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase )
return 100 * sari_score
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]:
UpperCamelCase : Optional[Any] = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
UpperCamelCase : Tuple = sacrebleu.corpus_bleu(
_lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {}
result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} )
result.update({"exact": compute_em(predictions=A_ , references=A_ )} )
return result
| 52 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase : Union[str, Any] = """true"""
def a__ ( snake_case__ , snake_case__=82 , snake_case__=16 ) -> Any:
set_seed(42 )
lowerCamelCase = RegressionModel()
lowerCamelCase = deepcopy(snake_case__ )
lowerCamelCase = RegressionDataset(length=snake_case__ )
lowerCamelCase = DataLoader(snake_case__ , batch_size=snake_case__ )
model.to(accelerator.device )
lowerCamelCase , lowerCamelCase = accelerator.prepare(snake_case__ , snake_case__ )
return model, ddp_model, dataloader
def a__ ( snake_case__ , snake_case__=False ) -> Optional[Any]:
lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
lowerCamelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(snake_case__ ):
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
with accelerator.main_process_first():
lowerCamelCase = dataset.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
if use_longest:
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return DataLoader(snake_case__ , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=16 )
def a__ ( snake_case__ , snake_case__ ) -> Dict:
lowerCamelCase = Accelerator(dispatch_batches=snake_case__ , split_batches=snake_case__ )
lowerCamelCase = get_dataloader(snake_case__ , not dispatch_batches )
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=snake_case__ )
lowerCamelCase , lowerCamelCase = accelerator.prepare(snake_case__ , snake_case__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
lowerCamelCase = []
for batch in dataloader:
lowerCamelCase , lowerCamelCase = batch.values()
with torch.no_grad():
lowerCamelCase = model(snake_case__ )
lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase , lowerCamelCase = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case__ )
targs.append(snake_case__ )
lowerCamelCase , lowerCamelCase = torch.cat(snake_case__ ), torch.cat(snake_case__ )
return logits, targs
def a__ ( snake_case__ , snake_case__=82 , snake_case__=False , snake_case__=False , snake_case__=16 ) -> Optional[int]:
lowerCamelCase , lowerCamelCase , lowerCamelCase = get_basic_setup(snake_case__ , snake_case__ , snake_case__ )
lowerCamelCase , lowerCamelCase = generate_predictions(snake_case__ , snake_case__ , snake_case__ )
assert (
len(snake_case__ ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case__ )}'
def a__ ( snake_case__ = False , snake_case__ = False ) -> Any:
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
lowerCamelCase , lowerCamelCase = get_mrpc_setup(snake_case__ , snake_case__ )
# First do baseline
lowerCamelCase , lowerCamelCase , lowerCamelCase = setup["""no"""]
model.to(snake_case__ )
model.eval()
for batch in dataloader:
batch.to(snake_case__ )
with torch.inference_mode():
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case__ , references=batch["""labels"""] )
lowerCamelCase = metric.compute()
# Then do distributed
lowerCamelCase , lowerCamelCase , lowerCamelCase = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
lowerCamelCase = batch["""labels"""]
lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case__ , references=snake_case__ )
lowerCamelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def a__ ( ) -> str:
lowerCamelCase = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(snake_case__ , snake_case__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(snake_case__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
lowerCamelCase = Accelerator()
test_torch_metrics(snake_case__ , 5_12 )
accelerator.state._reset_state()
def a__ ( snake_case__ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 351 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
@dataclass(frozen=UpperCAmelCase__ )
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = None , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = os.path.join(
_a , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase = cached_features_file + """.lock"""
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowerCamelCase = torch.load(_a )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowerCamelCase = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("""Training examples: %s""" , len(_a ) )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("""Saving features into cached file %s""" , _a )
torch.save(self.features , _a )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = 42
def __init__( self , _a , _a , _a , _a = 128 , _a=False , _a = False , ):
"""simple docstring"""
lowerCamelCase = hans_processors[task]()
lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase , lowerCamelCase = label_list[2], label_list[1]
lowerCamelCase = label_list
lowerCamelCase = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
lowerCamelCase = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCamelCase = tf.data.Dataset.from_generator(
_a , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _a ):
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.label_list
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = []
for i, line in enumerate(_a ):
if i == 0:
continue
lowerCamelCase = """%s-%s""" % (set_type, line[0])
lowerCamelCase = line[5]
lowerCamelCase = line[6]
lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCamelCase = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Tuple:
lowerCamelCase = {label: i for i, label in enumerate(snake_case__ )}
lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case__ ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case__ , max_length=snake_case__ , padding="""max_length""" , truncation=snake_case__ , return_overflowing_tokens=snake_case__ , )
lowerCamelCase = label_map[example.label] if example.label in label_map else 0
lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**snake_case__ , label=snake_case__ , pairID=snake_case__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
lowerCAmelCase : List[str] = {
"""hans""": 3,
}
lowerCAmelCase : str = {
"""hans""": HansProcessor,
}
| 168 | 0 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ : Optional[int] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCamelCase_ : Optional[Any] = {
'allenai/led-base-16384': 1_63_84,
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = LEDTokenizer
lowercase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ):
"""simple docstring"""
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
A_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case_ ) != add_prefix_space:
A_ : Any = getattr(snake_case_ , pre_tok_state.pop('type' ) )
A_ : Optional[int] = add_prefix_space
A_ : Dict = pre_tok_class(**snake_case_ )
A_ : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Optional[Any] = 'post_processor'
A_ : Tuple = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : int = tuple(state['sep'] )
if "cls" in state:
A_ : Any = tuple(state['cls'] )
A_ : Optional[Any] = False
if state.get('add_prefix_space' , snake_case_ ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Optional[int] = True
if state.get('trim_offsets' , snake_case_ ) != trim_offsets:
A_ : Optional[Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Tuple = getattr(snake_case_ , state.pop('type' ) )
A_ : Tuple = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
A_ : Optional[Any] = value
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : Any = kwargs.get('is_split_into_words' , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[Any] = kwargs.get('is_split_into_words' , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
A_ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = PaddingStrategy.DO_NOT_PAD , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Dict = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
A_ : int = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : Any = len(encoded_inputs['global_attention_mask'] ) != len(snake_case_ )
if needs_to_be_padded:
A_ : str = len(snake_case_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Optional[int] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 286 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Any = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
"""simple docstring"""
import argparse
import os
import re
_SCREAMING_SNAKE_CASE : List[str] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : Any = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : List[str] = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_SCREAMING_SNAKE_CASE : str = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(r"""\[([^\]]+)\]""")
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : str =_re_indent.search(UpperCAmelCase )
return "" if search is None else search.groups()[0]
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="" , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None ):
'''simple docstring'''
UpperCamelCase__ : int =0
UpperCamelCase__ : Union[str, Any] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase ):
index += 1
UpperCamelCase__ : Optional[int] =['''\n'''.join(lines[:index] )]
else:
UpperCamelCase__ : List[Any] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ : Dict =[lines[index]]
index += 1
while index < len(UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(UpperCAmelCase ) )
if index < len(UpperCAmelCase ) - 1:
UpperCamelCase__ : Optional[Any] =[lines[index + 1]]
index += 1
else:
UpperCamelCase__ : List[str] =[]
else:
blocks.append('''\n'''.join(UpperCAmelCase ) )
UpperCamelCase__ : List[Any] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase ) > 0:
blocks.append('''\n'''.join(UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _lowerCAmelCase ( UpperCAmelCase : str ):
'''simple docstring'''
def _inner(UpperCAmelCase : Dict ):
return key(UpperCAmelCase ).lower().replace('''_''' , '''''' )
return _inner
def _lowerCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : Dict=None ):
'''simple docstring'''
def noop(UpperCAmelCase : Optional[Any] ):
return x
if key is None:
UpperCamelCase__ : int =noop
# Constants are all uppercase, they go first.
UpperCamelCase__ : List[str] =[obj for obj in objects if key(UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ : Dict =[obj for obj in objects if key(UpperCAmelCase )[0].isupper() and not key(UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ : int =[obj for obj in objects if not key(UpperCAmelCase )[0].isupper()]
UpperCamelCase__ : Optional[int] =ignore_underscore(UpperCAmelCase )
return sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase ) + sorted(UpperCAmelCase , key=UpperCAmelCase )
def _lowerCAmelCase ( UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
def _replace(UpperCAmelCase : Union[str, Any] ):
UpperCamelCase__ : List[str] =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] ) + "]"
UpperCamelCase__ : List[Any] =import_statement.split('''\n''' )
if len(UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ : List[str] =2 if lines[1].strip() == '''[''' else 1
UpperCamelCase__ : List[str] =[(i, _re_strip_line.search(UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ : List[str] =sort_objects(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )
UpperCamelCase__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ : Dict =_re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ : Optional[int] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ : Tuple =keys[:-1]
UpperCamelCase__ : Optional[Any] =get_indent(lines[1] ) + ''', '''.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase )] )
return "\n".join(UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ : List[str] =_re_bracket_content.sub(_replace , UpperCAmelCase )
return import_statement
def _lowerCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=True ):
'''simple docstring'''
with open(UpperCAmelCase , '''r''' ) as f:
UpperCamelCase__ : int =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ : Optional[int] =split_code_in_indented_blocks(
UpperCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ : Dict =main_blocks[block_idx]
UpperCamelCase__ : List[str] =block.split('''\n''' )
# Get to the start of the imports.
UpperCamelCase__ : str =0
while line_idx < len(UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ : Optional[int] =len(UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[line_idx:-1] )
UpperCamelCase__ : Tuple =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ : str =split_code_in_indented_blocks(UpperCAmelCase , indent_level=UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ : str =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ : Tuple =[(pattern.search(UpperCAmelCase ).groups()[0] if pattern.search(UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ : List[Any] =[(i, key) for i, key in enumerate(UpperCAmelCase ) if key is not None]
UpperCamelCase__ : Optional[Any] =[x[0] for x in sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : str =[]
for i in range(len(UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ : Optional[Any] ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase , '''w''' ) as f:
f.write('''\n'''.join(UpperCAmelCase ) )
def _lowerCAmelCase ( UpperCAmelCase : Dict=True ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =[]
for root, _, files in os.walk(UpperCAmelCase ):
if "__init__.py" in files:
UpperCamelCase__ : List[Any] =sort_imports(os.path.join(UpperCAmelCase , '''__init__.py''' ) , check_only=UpperCAmelCase )
if result:
UpperCamelCase__ : int =[os.path.join(UpperCAmelCase , '''__init__.py''' )]
if len(UpperCAmelCase ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase )} files, run `make style`.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 157 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowercase: int = get_logger(__name__)
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ = None ):
"""simple docstring"""
a = (
os.path.join(lowerCamelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
a = Extractor
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
a = os.path.abspath(lowerCamelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCamelCase_ ) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(lowerCamelCase_ ) and not (os.path.isdir(lowerCamelCase_ ) and os.listdir(lowerCamelCase_ ))
)
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
a = self.extractor.infer_extractor_format(lowerCamelCase_ )
if not extractor_format:
return input_path
a = self._get_output_path(lowerCamelCase_ )
if self._do_extract(lowerCamelCase_ , lowerCamelCase_ ):
self.extractor.extract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return output_path
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
@classmethod
@abstractmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
...
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = []
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with open(lowerCamelCase_ , "rb" ) as f:
return f.read(lowerCamelCase_ )
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , lowerCamelCase_ = b"" ):
"""simple docstring"""
if not magic_number:
a = max(len(lowerCamelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
a = cls.read_magic_number(lowerCamelCase_ , lowerCamelCase_ )
except OSError:
return False
return any(magic_number.startswith(lowerCamelCase_ ) for cls_magic_number in cls.magic_numbers )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return tarfile.is_tarfile(lowerCamelCase_ )
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def resolved(lowerCamelCase_ ) -> str:
return os.path.realpath(os.path.abspath(lowerCamelCase_ ) )
def badpath(lowerCamelCase_ , lowerCamelCase_ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) ).startswith(lowerCamelCase_ )
def badlink(lowerCamelCase_ , lowerCamelCase_ ) -> bool:
# Links are interpreted relative to the directory containing the link
a = resolved(os.path.join(lowerCamelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCamelCase_ )
a = resolved(lowerCamelCase_ )
for finfo in members:
if badpath(finfo.name , lowerCamelCase_ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowerCamelCase_ , lowerCamelCase_ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowerCamelCase_ , lowerCamelCase_ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = tarfile.open(lowerCamelCase_ )
tar_file.extractall(lowerCamelCase_ , members=TarExtractor.safemembers(lowerCamelCase_ , lowerCamelCase_ ) )
tar_file.close()
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"\x1F\x8B"]
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with gzip.open(lowerCamelCase_ , "rb" ) as gzip_file:
with open(lowerCamelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ , lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [
B"PK\x03\x04",
B"PK\x05\x06", # empty archive
B"PK\x07\x08", # spanned archive
]
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , lowerCamelCase_ = b"" ):
"""simple docstring"""
if super().is_extractable(lowerCamelCase_ , magic_number=lowerCamelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCamelCase_ , "rb" ) as fp:
a = _EndRecData(lowerCamelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
a = fp.read(lowerCamelCase_ ) # CD is where we expect it to be
if len(lowerCamelCase_ ) == sizeCentralDir:
a = struct.unpack(lowerCamelCase_ , lowerCamelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with zipfile.ZipFile(lowerCamelCase_ , "r" ) as zip_file:
zip_file.extractall(lowerCamelCase_ )
zip_file.close()
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with lzma.open(lowerCamelCase_ ) as compressed_file:
with open(lowerCamelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ , lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"Rar!\x1a\x07\x00", B"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
a = rarfile.RarFile(lowerCamelCase_ )
rf.extractall(lowerCamelCase_ )
rf.close()
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"\x28\xb5\x2F\xFD"]
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
a = zstd.ZstdDecompressor()
with open(lowerCamelCase_ , "rb" ) as ifh, open(lowerCamelCase_ , "wb" ) as ofh:
dctx.copy_stream(lowerCamelCase_ , lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"\x42\x5A\x68"]
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
with bza.open(lowerCamelCase_ , "rb" ) as compressed_file:
with open(lowerCamelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ , lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with pyazr.SevenZipFile(lowerCamelCase_ , "r" ) as archive:
archive.extractall(lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = [B"\x04\x22\x4D\x18"]
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(lowerCamelCase_ , "rb" ) as compressed_file:
with open(lowerCamelCase_ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase_ , lowerCamelCase_ )
class _lowercase :
"""simple docstring"""
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__A = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCamelCase_ (cls ):
"""simple docstring"""
return max(
len(lowerCamelCase_ )
for extractor in cls.extractors.values()
if issubclass(lowerCamelCase_ , lowerCamelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCamelCase_ , magic_number_length=lowerCamelCase_ )
except OSError:
return b""
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , lowerCamelCase_ = False ):
"""simple docstring"""
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=lowerCamelCase_ , )
a = cls.infer_extractor_format(lowerCamelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ ): # <Added version="2.4.0"/>
"""simple docstring"""
a = cls._get_magic_number_max_length()
a = cls._read_magic_number(lowerCamelCase_ , lowerCamelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCamelCase_ , magic_number=lowerCamelCase_ ):
return extractor_format
@classmethod
def UpperCamelCase_ (cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(lowerCamelCase_ ) , exist_ok=lowerCamelCase_ )
# Prevent parallel extractions
a = str(Path(lowerCamelCase_ ).with_suffix(".lock" ) )
with FileLock(lowerCamelCase_ ):
shutil.rmtree(lowerCamelCase_ , ignore_errors=lowerCamelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCamelCase_ , lowerCamelCase_ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=lowerCamelCase_ , )
a = extractor if extractor != "deprecated" else extractor_format
else:
a = cls.extractors[extractor_format]
return extractor.extract(lowerCamelCase_ , lowerCamelCase_ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=lowerCamelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCamelCase_ ):
return extractor.extract(lowerCamelCase_ , lowerCamelCase_ )
| 227 |
def a( A : list ) -> list:
"""simple docstring"""
if any(not isinstance(A , A ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 227 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowercase: Union[str, Any] = logging.get_logger(__name__)
_lowercase: int = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "imagegpt"
__A = ["past_key_values"]
__A = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , lowerCamelCase_=512 + 1 , lowerCamelCase_=32 * 32 , lowerCamelCase_=512 , lowerCamelCase_=24 , lowerCamelCase_=8 , lowerCamelCase_=None , lowerCamelCase_="quick_gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1E-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = n_inner
a = activation_function
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = scale_attn_weights
a = use_cache
a = scale_attn_by_inverse_layer_idx
a = reorder_and_upcast_attn
a = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 32 , ):
"""simple docstring"""
a = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return inputs
| 371 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a( A : List[str] , A : int=0.999 , A : Union[str, Any]="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a = []
for i in range(A ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = [e.name for e in KarrasDiffusionSchedulers]
__A = 2
@register_to_config
def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0_0085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a = 1.0 - self.betas
a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if schedule_timesteps is None:
a = self.timesteps
a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
if self.state_in_first_order:
a = self.sigmas[step_index]
else:
a = self.sigmas_interpol[step_index]
a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
"""simple docstring"""
a = num_inference_steps
a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ )
a = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
# interpolate sigmas
a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase_ ).startswith("mps" ):
# mps does not support float64
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa )
else:
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
# interpolate timesteps
a = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype )
a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a = torch.cat([timesteps[:1], interleaved_timesteps] )
a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = sigma.log()
# get distribution
a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a = low_idx + 1
a = self.log_sigmas[low_idx]
a = self.log_sigmas[high_idx]
# interpolate sigmas
a = (low - log_sigma) / (low - high)
a = w.clamp(0 , 1 )
# transform interpolation to time range
a = (1 - w) * low_idx + w * high_idx
a = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.sample is None
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a = self.sigmas[step_index]
a = self.sigmas_interpol[step_index + 1]
a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a = self.sigmas[step_index - 1]
a = self.sigmas_interpol[step_index]
a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a = 0
a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a = sigma_interpol - sigma_hat
# store for 2nd order step
a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a = sigma_next - sigma_hat
a = self.sample
a = None
a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a = self.timesteps.to(original_samples.device )
a = timesteps.to(original_samples.device )
a = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a = sigma.unsqueeze(-1 )
a = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 | 0 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
lowerCAmelCase : Any = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2, arr_size - 1, 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase , lowerCAmelCase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__A : List[str] = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 138 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_UpperCAmelCase, _UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'Wrong input data\'s dimensions... '
f"dataset : {dataset.ndim}, value_array : {value_array.ndim}"
)
raise ValueError(_UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'Wrong input data\'s shape... '
f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"
)
raise ValueError(_UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Any = (
'Input data have different datatype... '
f"dataset : {dataset.dtype}, value_array : {value_array.dtype}"
)
raise TypeError(_UpperCAmelCase )
lowerCAmelCase : int = []
for value in value_array:
lowerCAmelCase : Tuple = euclidean(_UpperCAmelCase, dataset[0] )
lowerCAmelCase : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Dict = euclidean(_UpperCAmelCase, _UpperCAmelCase )
if dist > temp_dist:
lowerCAmelCase : Tuple = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return np.dot(_UpperCAmelCase, _UpperCAmelCase ) / (norm(_UpperCAmelCase ) * norm(_UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 1 |
snake_case_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 238 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : str = 'sew-d'
def __init__(self : str , a__ : List[Any]=32 , a__ : Optional[int]=768 , a__ : Union[str, Any]=12 , a__ : Tuple=12 , a__ : int=3072 , a__ : List[Any]=2 , a__ : Dict=512 , a__ : Dict=256 , a__ : List[str]=True , a__ : List[str]=True , a__ : str=("p2c", "c2p") , a__ : str="layer_norm" , a__ : str="gelu_python" , a__ : List[Any]=0.1 , a__ : Dict=0.1 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=0.0 , a__ : Dict=0.1 , a__ : int=0.0_2 , a__ : str=1E-7 , a__ : Union[str, Any]=1E-5 , a__ : Union[str, Any]="group" , a__ : Optional[Any]="gelu" , a__ : List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a__ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a__ : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a__ : List[str]=False , a__ : Optional[Any]=128 , a__ : Union[str, Any]=16 , a__ : Optional[Any]=True , a__ : List[Any]=0.0_5 , a__ : Tuple=10 , a__ : str=2 , a__ : int=0.0 , a__ : Optional[Any]=10 , a__ : List[Any]=0 , a__ : Optional[int]="mean" , a__ : List[str]=False , a__ : Tuple=False , a__ : Optional[int]=256 , a__ : str=0 , a__ : List[Any]=1 , a__ : int=2 , **a__ : List[str] , ):
"""simple docstring"""
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = list(a__ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = squeeze_factor
__snake_case = max_position_embeddings
__snake_case = position_buckets
__snake_case = share_att_key
__snake_case = relative_attention
__snake_case = norm_rel_ebd
__snake_case = list(a__ )
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layer_norm_eps
__snake_case = feature_layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# sequence classification
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
@property
def a (self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 238 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : Union[str, Any] = BeautifulSoup(requests.get(lowerCAmelCase_ , params=lowerCAmelCase_ ).content , '''html.parser''' )
lowerCAmelCase_ : List[Any] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowerCAmelCase_ : Union[str, Any] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase : str ={
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params)) | 262 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
'''simple docstring'''
__snake_case : dict[int, int] = {}
__snake_case : List[Any] = 2
while True:
__snake_case : Optional[Any] = factor_map.pop(lowerCamelCase_ , lowerCamelCase_ )
if factor:
__snake_case : List[str] = factor + prime
while x in factor_map:
x += factor
__snake_case : str = factor
else:
__snake_case : Union[str, Any] = prime
yield prime
prime += 1
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] = 1E10 ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = sieve()
__snake_case : str = 1
while True:
__snake_case : Optional[Any] = next(lowerCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 368 | """simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase_ , 2 ) - pow(UpperCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase_ , 2 ) + pow(UpperCAmelCase_ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _snake_case ( _lowercase ):
def __init__( self: Optional[Any] , *__lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=None , __lowerCamelCase: Dict=None , **__lowerCamelCase: Tuple ) -> Optional[Any]:
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Any = eval_examples
__UpperCAmelCase : List[str] = post_process_function
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[Dataset] = None , __lowerCamelCase: int=None , __lowerCamelCase: Optional[List[str]] = None , __lowerCamelCase: str = "eval" , **__lowerCamelCase: Dict , ) -> Dict[str, float]:
__UpperCAmelCase : str = gen_kwargs.copy()
__UpperCAmelCase : Dict = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
__UpperCAmelCase : str = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
__UpperCAmelCase : Union[str, Any] = gen_kwargs
__UpperCAmelCase : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__UpperCAmelCase : Optional[int] = self.get_eval_dataloader(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : List[Any] = self.compute_metrics
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = time.time()
__UpperCAmelCase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : List[str] = eval_loop(
__lowerCamelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
__UpperCAmelCase : Any = compute_metrics
__UpperCAmelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__UpperCAmelCase : str = self.post_process_function(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__UpperCAmelCase : str = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
__UpperCAmelCase : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__UpperCAmelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def _lowerCamelCase ( self: Any , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: Dict=None , __lowerCamelCase: str = "test" , **__lowerCamelCase: Optional[Any] ) -> List[str]:
__UpperCAmelCase : Optional[int] = gen_kwargs.copy()
__UpperCAmelCase : Optional[int] = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__UpperCAmelCase : int = self.compute_metrics
__UpperCAmelCase : Any = None
__UpperCAmelCase : Union[str, Any] = time.time()
__UpperCAmelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__UpperCAmelCase : Optional[int] = eval_loop(
__lowerCamelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
__UpperCAmelCase : List[str] = compute_metrics
__UpperCAmelCase : List[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__UpperCAmelCase : List[Any] = self.post_process_function(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , "predict" )
__UpperCAmelCase : List[str] = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__UpperCAmelCase : Optional[Any] = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 157 | from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__UpperCAmelCase : List[str] = (low + high) // 2
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = max_subarray(snake_case__, snake_case__, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_subarray(snake_case__, mid + 1, snake_case__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = max_cross_sum(snake_case__, snake_case__, snake_case__, snake_case__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ ) -> tuple[int, int, float]:
__UpperCAmelCase , __UpperCAmelCase : Any = float("-inf" ), -1
__UpperCAmelCase , __UpperCAmelCase : Dict = float("-inf" ), -1
__UpperCAmelCase : int | float = 0
for i in range(snake_case__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
__UpperCAmelCase : Optional[int] = summ
__UpperCAmelCase : Optional[Any] = i
__UpperCAmelCase : List[Any] = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
__UpperCAmelCase : List[str] = summ
__UpperCAmelCase : Dict = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCamelCase ( snake_case__ ) -> float:
__UpperCAmelCase : Optional[int] = [randint(1, snake_case__ ) for _ in range(snake_case__ )]
__UpperCAmelCase : Optional[int] = time.time()
max_subarray(snake_case__, 0, input_size - 1 )
__UpperCAmelCase : List[str] = time.time()
return end - start
def _UpperCamelCase ( ) -> None:
__UpperCAmelCase : str = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000]
__UpperCAmelCase : Optional[Any] = [time_max_subarray(snake_case__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(snake_case__, snake_case__ ):
print(snake_case__, "\t\t", snake_case__ )
plt.plot(snake_case__, snake_case__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 157 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 2 | """simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = ["pixel_values"]
def __init__( self: Union[str, Any] , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_: bool = True , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: bool = True , UpperCAmelCase_: Union[int, float] = 1 / 255 , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: bool = True , **UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name="""crop_size""" )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = crop_size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: int , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(UpperCAmelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Dict[str, int] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(UpperCAmelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Union[int, float] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: Tuple , ):
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: np.ndarray , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Union[float, List[float]] , UpperCAmelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: ImageInput , UpperCAmelCase_: bool = None , UpperCAmelCase_: Dict[str, int] = None , UpperCAmelCase_: PILImageResampling = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: int = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: float = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: Optional[Union[float, List[float]]] = None , UpperCAmelCase_: bool = None , UpperCAmelCase_: Optional[Union[str, TensorType]] = None , UpperCAmelCase_: Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_: List[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , param_name="""size""" , default_to_square=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase_ , param_name="""crop_size""" , default_to_square=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 306 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __UpperCAmelCase :
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : torch.Tensor # [batch_size x 3]
__snake_case : int
__snake_case : int
__snake_case : float
__snake_case : float
__snake_case : Tuple[int]
def UpperCamelCase ( self: str ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCAmelCase_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape
_SCREAMING_SNAKE_CASE = int(np.prod(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = self.get_image_coords()
_SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE = self.get_camera_rays(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = rays.view(UpperCAmelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def UpperCamelCase ( self: Any , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE = coords.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = self.resolution()
_SCREAMING_SNAKE_CASE = self.fov()
_SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE = fracs.view(UpperCAmelCase_ , -1 , 2 )
_SCREAMING_SNAKE_CASE = (
self.z.view(UpperCAmelCase_ , 1 , 3 )
+ self.x.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCAmelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCAmelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCAmelCase_ , *UpperCAmelCase_ , 2 , 3 )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: int ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCAmelCase_ , height=UpperCAmelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __lowerCamelCase ( snake_case__ ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for theta in np.linspace(0 ,2 * np.pi ,num=20 ):
_SCREAMING_SNAKE_CASE = np.array([np.sin(snake_case__ ), np.cos(snake_case__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_SCREAMING_SNAKE_CASE = -z * 4
_SCREAMING_SNAKE_CASE = np.array([np.cos(snake_case__ ), -np.sin(snake_case__ ), 0.0] )
_SCREAMING_SNAKE_CASE = np.cross(snake_case__ ,snake_case__ )
origins.append(snake_case__ )
xs.append(snake_case__ )
ys.append(snake_case__ )
zs.append(snake_case__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,x=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,y=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,z=torch.from_numpy(np.stack(snake_case__ ,axis=0 ) ).float() ,width=snake_case__ ,height=snake_case__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(snake_case__ )) ,)
| 306 | 1 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase__ :Optional[Any] = logging.get_logger(__name__)
# General docstring
lowercase__ :Dict = "RegNetConfig"
# Base docstring
lowercase__ :List[str] = "facebook/regnet-y-040"
lowercase__ :Optional[Any] = [1, 1088, 7, 7]
# Image classification docstring
lowercase__ :int = "facebook/regnet-y-040"
lowercase__ :Tuple = "tabby, tabby cat"
lowercase__ :Dict = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,A__ = 3 ,A__ = 1 ,A__ = 1 ,A__ = "relu" ,**A__ ,):
super().__init__(**A__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
lowercase = tf.keras.layers.ConvaD(
filters=A__ ,kernel_size=A__ ,strides=A__ ,padding='''VALID''' ,groups=A__ ,use_bias=A__ ,name='''convolution''' ,)
lowercase = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='''normalization''')
lowercase = ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self ,A__):
lowercase = self.convolution(self.padding(A__))
lowercase = self.normalization(A__)
lowercase = self.activation(A__)
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,**A__):
super().__init__(**A__)
lowercase = config.num_channels
lowercase = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,)
def A__ ( self ,A__):
lowercase = shape_list(A__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase = tf.transpose(A__ ,perm=(0, 2, 3, 1))
lowercase = self.embedder(A__)
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,A__ = 2 ,**A__):
super().__init__(**A__)
lowercase = tf.keras.layers.ConvaD(
filters=A__ ,kernel_size=1 ,strides=A__ ,use_bias=A__ ,name='''convolution''')
lowercase = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='''normalization''')
def A__ ( self ,A__ ,A__ = False):
return self.normalization(self.convolution(A__) ,training=A__)
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,A__ ,**A__):
super().__init__(**A__)
lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A__ ,name='''pooler''')
lowercase = [
tf.keras.layers.ConvaD(filters=A__ ,kernel_size=1 ,activation='''relu''' ,name='''attention.0'''),
tf.keras.layers.ConvaD(filters=A__ ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2'''),
]
def A__ ( self ,A__):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase = self.pooler(A__)
for layer_module in self.attention:
lowercase = layer_module(A__)
lowercase = hidden_state * pooled
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1 ,**A__):
super().__init__(**A__)
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 ,out_channels // config.groups_width)
lowercase = (
TFRegNetShortCut(A__ ,stride=A__ ,name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' ,name='''shortcut''')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase = [
TFRegNetConvLayer(A__ ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0'''),
TFRegNetConvLayer(
A__ ,stride=A__ ,groups=A__ ,activation=config.hidden_act ,name='''layer.1'''),
TFRegNetConvLayer(A__ ,kernel_size=1 ,activation=A__ ,name='''layer.2'''),
]
lowercase = ACTaFN[config.hidden_act]
def A__ ( self ,A__):
lowercase = hidden_state
for layer_module in self.layers:
lowercase = layer_module(A__)
lowercase = self.shortcut(A__)
hidden_state += residual
lowercase = self.activation(A__)
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1 ,**A__):
super().__init__(**A__)
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 ,out_channels // config.groups_width)
lowercase = (
TFRegNetShortCut(A__ ,stride=A__ ,name='''shortcut''')
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' ,name='''shortcut''')
)
lowercase = [
TFRegNetConvLayer(A__ ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0'''),
TFRegNetConvLayer(
A__ ,stride=A__ ,groups=A__ ,activation=config.hidden_act ,name='''layer.1'''),
TFRegNetSELayer(A__ ,reduced_channels=int(round(in_channels / 4)) ,name='''layer.2'''),
TFRegNetConvLayer(A__ ,kernel_size=1 ,activation=A__ ,name='''layer.3'''),
]
lowercase = ACTaFN[config.hidden_act]
def A__ ( self ,A__):
lowercase = hidden_state
for layer_module in self.layers:
lowercase = layer_module(A__)
lowercase = self.shortcut(A__)
hidden_state += residual
lowercase = self.activation(A__)
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 2 ,A__ = 2 ,**A__):
super().__init__(**A__)
lowercase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase = [
# downsampling is done in the first layer with stride of 2
layer(A__ ,A__ ,A__ ,stride=A__ ,name='''layers.0'''),
*[layer(A__ ,A__ ,A__ ,name=f'layers.{i+1}') for i in range(depth - 1)],
]
def A__ ( self ,A__):
for layer_module in self.layers:
lowercase = layer_module(A__)
return hidden_state
class lowercase ( tf.keras.layers.Layer ):
def __init__( self ,A__ ,**A__):
super().__init__(**A__)
lowercase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,))
lowercase = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(A__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(A__ ,A__ ,A__ ,depth=A__ ,name=f'stages.{i+1}'))
def A__ ( self ,A__ ,A__ = False ,A__ = True):
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(A__)
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=A__ ,hidden_states=A__)
@keras_serializable
class lowercase ( tf.keras.layers.Layer ):
lowercase_ : int =RegNetConfig
def __init__( self ,A__ ,**A__):
super().__init__(**A__)
lowercase = config
lowercase = TFRegNetEmbeddings(A__ ,name='''embedder''')
lowercase = TFRegNetEncoder(A__ ,name='''encoder''')
lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A__ ,name='''pooler''')
@unpack_inputs
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = False ,):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(A__ ,training=A__)
lowercase = self.encoder(
A__ ,output_hidden_states=A__ ,return_dict=A__ ,training=A__)
lowercase = encoder_outputs[0]
lowercase = self.pooler(A__)
# Change to NCHW output format have uniformity in the modules
lowercase = tf.transpose(A__ ,perm=(0, 3, 1, 2))
lowercase = tf.transpose(A__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase = tuple([tf.transpose(A__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A__ ,pooler_output=A__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : List[str] =RegNetConfig
lowercase_ : Any ='''regnet'''
lowercase_ : str ='''pixel_values'''
@property
def A__ ( self):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
lowercase__ :List[str] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase__ :List[Any] = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,*A__ ,**A__):
super().__init__(A__ ,*A__ ,**A__)
lowercase = TFRegNetMainLayer(A__ ,name='''regnet''')
@unpack_inputs
@add_start_docstrings_to_model_forward(A__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A__ ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__=False ,):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(
pixel_values=A__ ,output_hidden_states=A__ ,return_dict=A__ ,training=A__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE__ , )
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,*A__ ,**A__):
super().__init__(A__ ,*A__ ,**A__)
lowercase = config.num_labels
lowercase = TFRegNetMainLayer(A__ ,name='''regnet''')
# classification head
lowercase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def A__ ( self ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__=False ,):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(
A__ ,output_hidden_states=A__ ,return_dict=A__ ,training=A__)
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier[0](A__)
lowercase = self.classifier[1](A__)
lowercase = None if labels is None else self.hf_compute_loss(labels=A__ ,logits=A__)
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A__ ,logits=A__ ,hidden_states=outputs.hidden_states)
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ :str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :str = ["ViTFeatureExtractor"]
lowercase__ :int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase__ :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__a :Dict = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[Any] ):
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 312 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__a :Optional[Any] = logging.get_logger(__name__)
__a :Dict[Optional[str], Type[Formatter]] = {}
__a :Dict[Optional[str], str] = {}
__a :Dict[Optional[str], Exception] = {}
def __snake_case ( __UpperCamelCase : type ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ,):
"""simple docstring"""
A_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
A_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
A_ = format_type
def __snake_case ( __UpperCamelCase : Exception ,__UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[List[str]] = None ):
"""simple docstring"""
A_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__a :List[Any] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__a :List[str] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__a :Tuple = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __snake_case ( __UpperCamelCase : Optional[str] ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( __UpperCamelCase : Optional[str] ,**__UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = get_format_type_from_alias(__UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' ) | 312 | 1 |
def lowerCAmelCase__ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(SCREAMING_SNAKE_CASE_ , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""") | 307 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """falcon"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
def __init__( self , lowercase_=6_5024 , lowercase_=4544 , lowercase_=32 , lowercase_=71 , lowercase_=1E-5 , lowercase_=0.02 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=11 , lowercase_=11 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ : Dict = kwargs.pop("n_embed" , lowercase_ )
UpperCAmelCase_ : Any = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : List[Any] = hidden_dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = bos_token_id
UpperCAmelCase_ : Tuple = eos_token_id
UpperCAmelCase_ : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ : Any = alibi
UpperCAmelCase_ : Tuple = new_decoder_architecture
UpperCAmelCase_ : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ : List[str] = parallel_attn
UpperCAmelCase_ : int = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return not self.alibi
| 61 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = abs(snake_case__ )
__UpperCAmelCase : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Tuple = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase ( snake_case__ ) -> int:
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def _UpperCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Union[str, Any] = f'''{func.__name__}({value})'''
__UpperCAmelCase : List[str] = timeit(f'''__main__.{call}''', setup="import __main__" )
print(f'''{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__, snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 157 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""input_features"""]
def __init__( self , A=8_0 , A=1_6_0_0_0 , A=1_6_0 , A=3_0 , A=4_0_0 , A=0.0 , A=False , **A , ) -> List[str]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
snake_case : int = n_fft
snake_case : str = hop_length
snake_case : List[str] = chunk_length
snake_case : int = chunk_length * sampling_rate
snake_case : str = self.n_samples // hop_length
snake_case : Any = sampling_rate
snake_case : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , )
def UpperCAmelCase ( self , A ) -> np.ndarray:
snake_case : Optional[Any] = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
snake_case : Any = log_spec[:, :-1]
snake_case : Optional[Any] = np.maximum(A , log_spec.max() - 8.0 )
snake_case : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase ( A , A , A = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
snake_case : Optional[Any] = np.array(A , np.intaa )
snake_case : Tuple = []
for vector, length in zip(A , attention_mask.sum(-1 ) ):
snake_case : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
snake_case : int = padding_value
normed_input_values.append(A )
else:
snake_case : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , A , A = True , A = None , A = None , A = None , A = "max_length" , A = None , A = None , A = None , **A , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case : List[Any] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
snake_case : int = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
snake_case : Optional[Any] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case : str = [np.asarray([raw_speech] ).T]
snake_case : Optional[int] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
snake_case : Optional[int] = self.pad(
A , padding=A , max_length=max_length if max_length else self.n_samples , truncation=A , pad_to_multiple_of=A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
snake_case : str = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
snake_case : List[str] = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
snake_case : List[str] = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
snake_case : Dict = [self._np_extract_fbank_features(A ) for waveform in input_features[0]]
if isinstance(input_features[0] , A ):
snake_case : str = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
else:
snake_case : Tuple = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
snake_case : Union[str, Any] = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
snake_case : Tuple = padded_inputs.convert_to_tensors(A )
return padded_inputs
def UpperCAmelCase ( self ) -> Dict[str, Any]:
snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
snake_case : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 176 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """bart"""
_snake_case = ["""past_key_values"""]
_snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A=5_0_2_6_5 , A=1_0_2_4 , A=1_2 , A=4_0_9_6 , A=1_6 , A=1_2 , A=4_0_9_6 , A=1_6 , A=0.0 , A=0.0 , A="gelu" , A=1_0_2_4 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0.0 , A=False , A=True , A=3 , A=1 , A=0 , A=2 , A=True , A=2 , A=2 , **A , ) -> Any:
snake_case : Optional[int] = vocab_size
snake_case : Union[str, Any] = max_position_embeddings
snake_case : List[str] = d_model
snake_case : List[Any] = encoder_ffn_dim
snake_case : Optional[Any] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : str = decoder_ffn_dim
snake_case : Union[str, Any] = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Union[str, Any] = dropout
snake_case : List[str] = attention_dropout
snake_case : List[Any] = activation_dropout
snake_case : Optional[int] = activation_function
snake_case : Union[str, Any] = init_std
snake_case : List[str] = encoder_layerdrop
snake_case : int = decoder_layerdrop
snake_case : str = classifier_dropout
snake_case : List[str] = use_cache
snake_case : Tuple = encoder_layers
snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A ):
snake_case : Any = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case : Tuple = {0: """batch"""}
snake_case : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
snake_case : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case , snake_case : List[Any] = self.num_layers
for i in range(A ):
snake_case : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = super().outputs
else:
snake_case : Any = super(A , self ).outputs
if self.use_past:
snake_case , snake_case : Any = self.num_layers
for i in range(A ):
snake_case : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
# Generate decoder inputs
snake_case : Any = seq_length if not self.use_past else 1
snake_case : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
snake_case : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case : List[str] = dict(**A , **A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : Optional[int] = common_inputs["""input_ids"""].shape
snake_case : Any = common_inputs["""decoder_input_ids"""].shape[1]
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Any = decoder_seq_length + 3
snake_case : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(A , A )] , dim=1 )
snake_case : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case , snake_case : Any = self.num_layers
snake_case : List[str] = min(A , A )
snake_case : Dict = max(A , A ) - min_num_layers
snake_case : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(A ):
common_inputs["past_key_values"].append(
(
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
) )
# TODO: test this.
snake_case : Tuple = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(A , A ):
common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) )
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
snake_case : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case : Optional[int] = seqlen + 2
snake_case , snake_case : Tuple = self.num_layers
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Optional[Any] = common_inputs["""attention_mask"""].dtype
snake_case : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
snake_case : Union[str, Any] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(A )
]
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : int = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : int = tokenizer.num_special_tokens_to_add(A )
snake_case : Tuple = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
snake_case : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : str = dict(tokenizer(A , return_tensors=A ) )
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
elif self.task == "causal-lm":
snake_case : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
else:
snake_case : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
return common_inputs
def UpperCAmelCase ( self , A , A , A , A ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = super()._flatten_past_key_values_(A , A , A , A )
else:
snake_case : Union[str, Any] = super(A , self )._flatten_past_key_values_(
A , A , A , A )
| 176 | 1 |
'''simple docstring'''
from __future__ import annotations
import queue
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
A__ : int =data
A__ : List[str] =None
A__ : Union[str, Any] =None
def __lowerCamelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
A__ : Optional[Any] =input("""Enter the value of the root node: """ ).strip().lower()
A__ : queue.Queue =queue.Queue()
A__ : int =TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
A__ : Optional[int] =q.get()
A__ : Tuple =f"Enter the left node of {node_found.data}: "
A__ : str =input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
A__ : Optional[Any] =TreeNode(int(a_ ) )
A__ : Optional[int] =left_node
q.put(a_ )
A__ : Dict =f"Enter the right node of {node_found.data}: "
A__ : int =input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
A__ : str =TreeNode(int(a_ ) )
A__ : Tuple =right_node
q.put(a_ )
raise
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
print(node.data, end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __lowerCamelCase ( __snake_case : List[Any] ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
in_order(node.left )
print(node.data, end=""",""" )
in_order(node.right )
def __lowerCamelCase ( __snake_case : List[str] ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=""",""" )
def __lowerCamelCase ( __snake_case : List[Any] ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
A__ : queue.Queue =queue.Queue()
q.put(a_ )
while not q.empty():
A__ : str =q.get()
print(node_dequeued.data, end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __lowerCamelCase ( __snake_case : Dict ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
A__ : queue.Queue =queue.Queue()
q.put(a_ )
while not q.empty():
A__ : Dict =[]
while not q.empty():
A__ : Any =q.get()
print(node_dequeued.data, end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def __lowerCamelCase ( __snake_case : Optional[int] ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
A__ : list[TreeNode] =[]
A__ : Dict =node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=""",""" )
stack.append(a_ )
A__ : Tuple =n.left
# end of while means current node doesn't have left child
A__ : str =stack.pop()
# start to traverse its right child
A__ : List[Any] =n.right
def __lowerCamelCase ( __snake_case : Any ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
A__ : list[TreeNode] =[]
A__ : Any =node
while n or stack:
while n:
stack.append(a_ )
A__ : Tuple =n.left
A__ : str =stack.pop()
print(n.data, end=""",""" )
A__ : List[str] =n.right
def __lowerCamelCase ( __snake_case : Optional[int] ) -> None:
"""simple docstring"""
if not isinstance(a_, a_ ) or not node:
return
A__ : str =[], []
A__ : List[str] =node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
A__ : str =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=""",""" )
def __lowerCamelCase ( __snake_case : Optional[Any] = "", __snake_case : str=50, __snake_case : Any="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
A__ : Optional[Any] =divmod(width - len(a_ ) - 2, 2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__snake_case : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 134 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :int = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""vit_msn"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-06 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : int =hidden_size
__UpperCamelCase : List[Any] =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : List[str] =intermediate_size
__UpperCamelCase : Union[str, Any] =hidden_act
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : Union[str, Any] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Optional[Any] =image_size
__UpperCamelCase : Optional[int] =patch_size
__UpperCamelCase : Any =num_channels
__UpperCamelCase : str =qkv_bias
| 71 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase : Tuple = ''''''
else:
UpperCamelCase : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCamelCase : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase : int = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase : str = in_proj_bias[: config.hidden_size]
UpperCamelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
UpperCamelCase : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = val
def a ( ):
"""simple docstring"""
UpperCamelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True ):
"""simple docstring"""
UpperCamelCase : Tuple = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase : Any = 8
# set labels if required
if not base_model:
UpperCamelCase : List[Any] = 1_0_0_0
UpperCamelCase : str = '''huggingface/label-files'''
UpperCamelCase : Any = '''imagenet-1k-id2label.json'''
UpperCamelCase : Any = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase : Any = idalabel
UpperCamelCase : int = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase : Tuple = 3_8_4
UpperCamelCase : Dict = 1_5_3_6
UpperCamelCase : int = 1_2
UpperCamelCase : Optional[Any] = 6
# load original model from torch hub
UpperCamelCase : List[str] = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase : Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
UpperCamelCase : Optional[Any] = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
UpperCamelCase : List[Any] = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase : str = ViTImageProcessor()
UpperCamelCase : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
if base_model:
UpperCamelCase : Optional[Any] = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
UpperCamelCase : str = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__UpperCAmelCase : List[str] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 315 |
import math
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase : Union[str, Any] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=1 , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = factor * value
UpperCamelCase : Optional[int] = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 315 | 1 |
from __future__ import annotations
def UpperCamelCase_( _snake_case : list[int] ):
"""simple docstring"""
__a =len(_snake_case ) // 2
# choose the middle 3 elements
__a =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =[False] * len(_snake_case )
__a =[-1] * len(_snake_case )
def dfs(_snake_case : Dict , _snake_case : Any ):
__a =True
__a =c
for u in graph[v]:
if not visited[u]:
dfs(_snake_case , 1 - c )
for i in range(len(_snake_case ) ):
if not visited[i]:
dfs(_snake_case , 0 )
for i in range(len(_snake_case ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_lowerCAmelCase : int = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 218 | 1 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__A = True
from torch.cuda.amp import autocast
__A = logging.getLogger(__name__)
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Optional[Any] = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ : List[str] = field(
default=lowerCamelCase__, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
SCREAMING_SNAKE_CASE_ : Tuple = field(
default=lowerCamelCase__, metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
SCREAMING_SNAKE_CASE_ : List[Any] = field(
default=lowerCamelCase__, metadata={"""help""": """Whether to log verbose messages or not."""}, )
SCREAMING_SNAKE_CASE_ : str = field(
default=2.0, metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
SCREAMING_SNAKE_CASE_ : List[Any] = field(
default=0.5, metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
SCREAMING_SNAKE_CASE_ : str = field(
default=0.99_9995, metadata={"""help""": """Decay of gumbel temperature during training."""} )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCAmelCase: Any = logging.WARNING
if model_args.verbose_logging:
__lowerCAmelCase: Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__lowerCAmelCase: Any = logging.INFO
logger.setLevel(__SCREAMING_SNAKE_CASE )
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default=lowerCamelCase__, metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = field(
default=lowerCamelCase__, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default="""train""", metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
}, )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = field(
default="""validation""", metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
}, )
SCREAMING_SNAKE_CASE_ : Any = field(
default="""file""", metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""}, )
SCREAMING_SNAKE_CASE_ : Dict = field(
default=lowerCamelCase__, metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE_ : Tuple = field(
default=1, metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
}, )
SCREAMING_SNAKE_CASE_ : List[Any] = field(
default=lowerCamelCase__, metadata={"""help""": """The number of processes to use for the preprocessing."""}, )
SCREAMING_SNAKE_CASE_ : str = field(
default=20.0, metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class snake_case :
SCREAMING_SNAKE_CASE_ : Tuple = 42
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : int = """longest"""
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = None
def __call__( self : int , UpperCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: List[Any] = self.feature_extractor.pad(
__A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
__lowerCAmelCase: int = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
__lowerCAmelCase: Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__lowerCAmelCase: Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
__lowerCAmelCase: Dict = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
__lowerCAmelCase: Tuple = 1
__lowerCAmelCase: int = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
__lowerCAmelCase: Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__A , min_masks=2 , )
return batch
class snake_case ( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , *UpperCamelCase__ : int , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Optional[Any]=1.0 , **UpperCamelCase__ : Any)-> Optional[int]:
'''simple docstring'''
super().__init__(*__A , **__A)
__lowerCAmelCase: Any = 0
__lowerCAmelCase: Any = max_gumbel_temp
__lowerCAmelCase: Optional[Any] = min_gumbel_temp
__lowerCAmelCase: str = gumbel_temp_decay
def lowercase_ ( self : Dict , UpperCamelCase__ : nn.Module , UpperCamelCase__ : Dict[str, Union[torch.Tensor, Any]])-> Any:
'''simple docstring'''
model.train()
__lowerCAmelCase: List[Any] = self._prepare_inputs(__A)
if self.use_amp:
with autocast():
__lowerCAmelCase: Optional[Any] = self.compute_loss(__A , __A)
else:
__lowerCAmelCase: Optional[int] = self.compute_loss(__A , __A)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__lowerCAmelCase: Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__lowerCAmelCase: str = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']")
if self.args.gradient_accumulation_steps > 1:
__lowerCAmelCase: Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A).backward()
elif self.use_apex:
with amp.scale_loss(__A , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def a__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase: Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCAmelCase: List[Any] = parser.parse_args_into_dataclasses()
configure_logger(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Downloading and loading a dataset from the hub.
__lowerCAmelCase: int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase: Union[str, Any] = DatasetDict()
__lowerCAmelCase: Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase: Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__lowerCAmelCase: Optional[Any] = DatasetDict()
__lowerCAmelCase: List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
__lowerCAmelCase: int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__lowerCAmelCase: Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__SCREAMING_SNAKE_CASE )
def prepare_dataset(__SCREAMING_SNAKE_CASE ):
# check that all files have the correct sampling rate
__lowerCAmelCase: Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__lowerCAmelCase: str = datasets.map(
__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
__lowerCAmelCase: int = vectorized_datasets.filter(
lambda __SCREAMING_SNAKE_CASE : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__SCREAMING_SNAKE_CASE ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__lowerCAmelCase: Any = vectorized_datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__lowerCAmelCase: Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm=\'layer\'" )
__lowerCAmelCase: Any = WavaVecaForPreTraining(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = DataCollatorForWavaVecaPretraining(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = WavaVecaPreTrainer(
model=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__SCREAMING_SNAKE_CASE , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
__A = datasets.logging.get_logger(__name__)
__A = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__A = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__A = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence"),
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowercase_ ( self : Tuple , UpperCamelCase__ : Any)-> Dict:
'''simple docstring'''
if self.config_name == "default":
__lowerCAmelCase: Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da"))
else:
__lowerCAmelCase: Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=False)-> str:
'''simple docstring'''
if gpus is None:
__lowerCAmelCase: Union[str, Any] = 1 if torch.cuda.is_available() else 0
__lowerCAmelCase: Dict = {"src": sources, "mt": predictions, "ref": references}
__lowerCAmelCase: Union[str, Any] = [dict(zip(UpperCamelCase__ , UpperCamelCase__)) for t in zip(*data.values())]
__lowerCAmelCase , __lowerCAmelCase: str = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__)
return {"mean_score": mean_score, "scores": scores}
| 108 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.